diff --git a/[refs] b/[refs] index e3b77e61ee3c..0e37dd94e4e0 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: bcd7351e83728859833e3c5b8aae9a2816914e4b +refs/heads/master: 40cf48337cedc31d5c06255c25213136f7ffe324 diff --git a/trunk/Documentation/DocBook/media/v4l/dev-codec.xml b/trunk/Documentation/DocBook/media/v4l/dev-codec.xml index ff44c16fc080..dca0ecd54dc6 100644 --- a/trunk/Documentation/DocBook/media/v4l/dev-codec.xml +++ b/trunk/Documentation/DocBook/media/v4l/dev-codec.xml @@ -1,27 +1,18 @@ Codec Interface - A V4L2 codec can compress, decompress, transform, or otherwise -convert video data from one format into another format, in memory. Typically -such devices are memory-to-memory devices (i.e. devices with the -V4L2_CAP_VIDEO_M2M or V4L2_CAP_VIDEO_M2M_MPLANE -capability set). - + + Suspended - A memory-to-memory video node acts just like a normal video node, but it -supports both output (sending frames from memory to the codec hardware) and -capture (receiving the processed frames from the codec hardware into memory) -stream I/O. An application will have to setup the stream -I/O for both sides and finally call &VIDIOC-STREAMON; for both capture and output -to start the codec. + This interface has been be suspended from the V4L2 API +implemented in Linux 2.6 until we have more experience with codec +device interfaces. + - Video compression codecs use the MPEG controls to setup their codec parameters -(note that the MPEG controls actually support many more codecs than just MPEG). -See . + A V4L2 codec can compress, decompress, transform, or otherwise +convert video data from one format into another format, in memory. +Applications send data to be converted to the driver through a +&func-write; call, and receive the converted data through a +&func-read; call. For efficiency a driver may also support streaming +I/O. - Memory-to-memory devices can often be used as a shared resource: you can -open the video node multiple times, each application setting up their own codec properties -that are local to the file handle, and each can use it independently from the others. -The driver will arbitrate access to the codec and reprogram it whenever another file -handler gets access. This is different from the usual video node behavior where the video properties -are global to the device (i.e. changing something through one file handle is visible -through another file handle). + [to do] diff --git a/trunk/Documentation/DocBook/media/v4l/v4l2.xml b/trunk/Documentation/DocBook/media/v4l/v4l2.xml index bfe823dd0f31..bfc93cdcf696 100644 --- a/trunk/Documentation/DocBook/media/v4l/v4l2.xml +++ b/trunk/Documentation/DocBook/media/v4l/v4l2.xml @@ -493,7 +493,7 @@ and discussions on the V4L mailing list. Video for Linux Two API Specification - Revision 3.10 + Revision 3.9 &sub-common; diff --git a/trunk/Documentation/bcache.txt b/trunk/Documentation/bcache.txt index b3a7e7d384f6..77db8809bd96 100644 --- a/trunk/Documentation/bcache.txt +++ b/trunk/Documentation/bcache.txt @@ -319,10 +319,7 @@ cache<0..n> Symlink to each of the cache devices comprising this cache set. cache_available_percent - Percentage of cache device which doesn't contain dirty data, and could - potentially be used for writeback. This doesn't mean this space isn't used - for clean cached data; the unused statistic (in priority_stats) is typically - much lower. + Percentage of cache device free. clear_stats Clears the statistics associated with this cache @@ -426,11 +423,8 @@ nbuckets Total buckets in this cache priority_stats - Statistics about how recently data in the cache has been accessed. - This can reveal your working set size. Unused is the percentage of - the cache that doesn't contain any data. Metadata is bcache's - metadata overhead. Average is the average priority of cache buckets. - Next is a list of quantiles with the priority threshold of each. + Statistics about how recently data in the cache has been accessed. This can + reveal your working set size. written Sum of all data that has been written to the cache; comparison with diff --git a/trunk/Documentation/devices.txt b/trunk/Documentation/devices.txt index b9015912bca6..08f01e79c41a 100644 --- a/trunk/Documentation/devices.txt +++ b/trunk/Documentation/devices.txt @@ -498,8 +498,12 @@ Your cooperation is appreciated. Each device type has 5 bits (32 minors). - 13 block Previously used for the XT disk (/dev/xdN) - Deleted in kernel v3.9. + 13 block 8-bit MFM/RLL/IDE controller + 0 = /dev/xda First XT disk whole disk + 64 = /dev/xdb Second XT disk whole disk + + Partitions are handled in the same way as IDE disks + (see major number 3). 14 char Open Sound System (OSS) 0 = /dev/mixer Mixer control diff --git a/trunk/Documentation/devicetree/bindings/video/exynos_hdmi.txt b/trunk/Documentation/devicetree/bindings/drm/exynos/hdmi.txt similarity index 100% rename from trunk/Documentation/devicetree/bindings/video/exynos_hdmi.txt rename to trunk/Documentation/devicetree/bindings/drm/exynos/hdmi.txt diff --git a/trunk/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt b/trunk/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt similarity index 100% rename from trunk/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt rename to trunk/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt diff --git a/trunk/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt b/trunk/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt similarity index 100% rename from trunk/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt rename to trunk/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt diff --git a/trunk/Documentation/devicetree/bindings/video/exynos_mixer.txt b/trunk/Documentation/devicetree/bindings/drm/exynos/mixer.txt similarity index 100% rename from trunk/Documentation/devicetree/bindings/video/exynos_mixer.txt rename to trunk/Documentation/devicetree/bindings/drm/exynos/mixer.txt diff --git a/trunk/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt b/trunk/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt index de9f6b78ee51..3f62adfb3e0b 100644 --- a/trunk/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt +++ b/trunk/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt @@ -2,7 +2,7 @@ Exynos4x12/Exynos5 SoC series camera host interface (FIMC-LITE) Required properties: -- compatible : should be "samsung,exynos4212-fimc-lite" for Exynos4212 and +- compatible : should be "samsung,exynos4212-fimc" for Exynos4212 and Exynos4412 SoCs; - reg : physical base address and size of the device memory mapped registers; diff --git a/trunk/Documentation/devicetree/bindings/net/macb.txt b/trunk/Documentation/devicetree/bindings/net/macb.txt index 4ff65047bb9a..44afa0e5057d 100644 --- a/trunk/Documentation/devicetree/bindings/net/macb.txt +++ b/trunk/Documentation/devicetree/bindings/net/macb.txt @@ -4,7 +4,7 @@ Required properties: - compatible: Should be "cdns,[-]{macb|gem}" Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs. Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb". - Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on + Use "cnds,pc302-gem" for Picochip picoXcell pc302 and later devices based on the Cadence GEM, or the generic form: "cdns,gem". - reg: Address and length of the register set for the device - interrupts: Should contain macb interrupt diff --git a/trunk/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt b/trunk/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt index 34c1505774bf..2a3feabd3b22 100644 --- a/trunk/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt +++ b/trunk/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt @@ -1,7 +1,7 @@ Atmel AT91RM9200 Real Time Clock Required properties: -- compatible: should be: "atmel,at91rm9200-rtc" or "atmel,at91sam9x5-rtc" +- compatible: should be: "atmel,at91rm9200-rtc" - reg: physical base address of the controller and length of memory mapped region. - interrupts: rtc alarm/event interrupt diff --git a/trunk/Documentation/devicetree/bindings/video/simple-framebuffer.txt b/trunk/Documentation/devicetree/bindings/video/simple-framebuffer.txt deleted file mode 100644 index 3ea460583111..000000000000 --- a/trunk/Documentation/devicetree/bindings/video/simple-framebuffer.txt +++ /dev/null @@ -1,25 +0,0 @@ -Simple Framebuffer - -A simple frame-buffer describes a raw memory region that may be rendered to, -with the assumption that the display hardware has already been set up to scan -out from that buffer. - -Required properties: -- compatible: "simple-framebuffer" -- reg: Should contain the location and size of the framebuffer memory. -- width: The width of the framebuffer in pixels. -- height: The height of the framebuffer in pixels. -- stride: The number of bytes in each line of the framebuffer. -- format: The format of the framebuffer surface. Valid values are: - - r5g6b5 (16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b). - -Example: - - framebuffer { - compatible = "simple-framebuffer"; - reg = <0x1d385000 (1600 * 1200 * 2)>; - width = <1600>; - height = <1200>; - stride = <(1600 * 2)>; - format = "r5g6b5"; - }; diff --git a/trunk/Documentation/devicetree/usage-model.txt b/trunk/Documentation/devicetree/usage-model.txt index 0efedaad5165..ef9d06c9f8fd 100644 --- a/trunk/Documentation/devicetree/usage-model.txt +++ b/trunk/Documentation/devicetree/usage-model.txt @@ -191,11 +191,9 @@ Linux it will look something like this: }; The bootargs property contains the kernel arguments, and the initrd-* -properties define the address and size of an initrd blob. Note that -initrd-end is the first address after the initrd image, so this doesn't -match the usual semantic of struct resource. The chosen node may also -optionally contain an arbitrary number of additional properties for -platform-specific configuration data. +properties define the address and size of an initrd blob. The +chosen node may also optionally contain an arbitrary number of +additional properties for platform-specific configuration data. During early boot, the architecture setup code calls of_scan_flat_dt() several times with different helper callbacks to parse device tree diff --git a/trunk/Documentation/dmatest.txt b/trunk/Documentation/dmatest.txt index 132a094c7bc3..279ac0a8c5b1 100644 --- a/trunk/Documentation/dmatest.txt +++ b/trunk/Documentation/dmatest.txt @@ -34,7 +34,7 @@ command: After a while you will start to get messages about current status or error like in the original code. -Note that running a new test will not stop any in progress test. +Note that running a new test will stop any in progress test. The following command should return actual state of the test. % cat /sys/kernel/debug/dmatest/run @@ -52,8 +52,8 @@ To wait for test done the user may perform a busy loop that checks the state. The module parameters that is supplied to the kernel command line will be used for the first performed test. After user gets a control, the test could be -re-run with the same or different parameters. For the details see the above -section "Part 2 - When dmatest is built as a module..." +interrupted or re-run with same or different parameters. For the details see +the above section "Part 2 - When dmatest is built as a module..." In both cases the module parameters are used as initial values for the test case. You always could check them at run-time by running diff --git a/trunk/Documentation/filesystems/Locking b/trunk/Documentation/filesystems/Locking index 9858f337529c..0706d32a61e6 100644 --- a/trunk/Documentation/filesystems/Locking +++ b/trunk/Documentation/filesystems/Locking @@ -189,7 +189,7 @@ prototypes: loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); - void (*invalidatepage) (struct page *, unsigned int, unsigned int); + int (*invalidatepage) (struct page *, unsigned long); int (*releasepage) (struct page *, int); void (*freepage)(struct page *); int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, @@ -310,8 +310,8 @@ filesystems and by the swapper. The latter will eventually go away. Please, keep it that way and don't breed new callers. ->invalidatepage() is called when the filesystem must attempt to drop -some or all of the buffers from the page when it is being truncated. It -returns zero on success. If ->invalidatepage is zero, the kernel uses +some or all of the buffers from the page when it is being truncated. It +returns zero on success. If ->invalidatepage is zero, the kernel uses block_invalidatepage() instead. ->releasepage() is called when the kernel is about to try to drop the @@ -414,7 +414,7 @@ prototypes: ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); - int (*iterate) (struct file *, struct dir_context *); + int (*readdir) (struct file *, void *, filldir_t); unsigned int (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); diff --git a/trunk/Documentation/filesystems/f2fs.txt b/trunk/Documentation/filesystems/f2fs.txt index b91e2f26b672..bd3c56c67380 100644 --- a/trunk/Documentation/filesystems/f2fs.txt +++ b/trunk/Documentation/filesystems/f2fs.txt @@ -98,13 +98,8 @@ Cleaning Overhead MOUNT OPTIONS ================================================================================ -background_gc=%s Turn on/off cleaning operations, namely garbage - collection, triggered in background when I/O subsystem is - idle. If background_gc=on, it will turn on the garbage - collection and if background_gc=off, garbage collection - will be truned off. - Default value for this option is on. So garbage - collection is on by default. +background_gc_off Turn off cleaning operations, namely garbage collection, + triggered in background when I/O subsystem is idle. disable_roll_forward Disable the roll-forward recovery routine discard Issue discard/TRIM commands when a segment is cleaned. no_heap Disable heap-style segment allocation which finds free diff --git a/trunk/Documentation/filesystems/porting b/trunk/Documentation/filesystems/porting index 206a1bdc7321..4db22f6491e0 100644 --- a/trunk/Documentation/filesystems/porting +++ b/trunk/Documentation/filesystems/porting @@ -445,9 +445,3 @@ object doesn't exist. It's remote/distributed ones that might care... [mandatory] FS_REVAL_DOT is gone; if you used to have it, add ->d_weak_revalidate() in your dentry operations instead. --- -[mandatory] - vfs_readdir() is gone; switch to iterate_dir() instead --- -[mandatory] - ->readdir() is gone now; switch to ->iterate() diff --git a/trunk/Documentation/filesystems/vfs.txt b/trunk/Documentation/filesystems/vfs.txt index e6bd1ffd821e..bc4b06b3160a 100644 --- a/trunk/Documentation/filesystems/vfs.txt +++ b/trunk/Documentation/filesystems/vfs.txt @@ -549,7 +549,7 @@ struct address_space_operations ------------------------------- This describes how the VFS can manipulate mapping of a file to page cache in -your filesystem. The following members are defined: +your filesystem. As of kernel 2.6.22, the following members are defined: struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); @@ -566,7 +566,7 @@ struct address_space_operations { loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); - void (*invalidatepage) (struct page *, unsigned int, unsigned int); + int (*invalidatepage) (struct page *, unsigned long); int (*releasepage) (struct page *, int); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, @@ -685,14 +685,14 @@ struct address_space_operations { invalidatepage: If a page has PagePrivate set, then invalidatepage will be called when part or all of the page is to be removed from the address space. This generally corresponds to either a - truncation, punch hole or a complete invalidation of the address - space (in the latter case 'offset' will always be 0 and 'length' - will be PAGE_CACHE_SIZE). Any private data associated with the page - should be updated to reflect this truncation. If offset is 0 and - length is PAGE_CACHE_SIZE, then the private data should be released, - because the page must be able to be completely discarded. This may - be done by calling the ->releasepage function, but in this case the - release MUST succeed. + truncation or a complete invalidation of the address space + (in the latter case 'offset' will always be 0). + Any private data associated with the page should be updated + to reflect this truncation. If offset is 0, then + the private data should be released, because the page + must be able to be completely discarded. This may be done by + calling the ->releasepage function, but in this case the + release MUST succeed. releasepage: releasepage is called on PagePrivate pages to indicate that the page should be freed if possible. ->releasepage @@ -777,7 +777,7 @@ struct file_operations { ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); - int (*iterate) (struct file *, struct dir_context *); + int (*readdir) (struct file *, void *, filldir_t); unsigned int (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); @@ -815,7 +815,7 @@ otherwise noted. aio_write: called by io_submit(2) and other asynchronous I/O operations - iterate: called when the VFS needs to read the directory contents + readdir: called when the VFS needs to read the directory contents poll: called by the VFS when a process wants to check if there is activity on this file and (optionally) go to sleep until there diff --git a/trunk/Documentation/filesystems/xfs.txt b/trunk/Documentation/filesystems/xfs.txt index 83577f0232a0..3e4b3dd1e046 100644 --- a/trunk/Documentation/filesystems/xfs.txt +++ b/trunk/Documentation/filesystems/xfs.txt @@ -33,9 +33,6 @@ When mounting an XFS filesystem, the following options are accepted. removing extended attributes) the on-disk superblock feature bit field will be updated to reflect this format being in use. - CRC enabled filesystems always use the attr2 format, and so - will reject the noattr2 mount option if it is set. - barrier Enables the use of block layer write barriers for writes into the journal and unwritten extent conversion. This allows for diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 2fe6e767b3d6..c3bfacb92910 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -3005,27 +3005,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Force threading of all interrupt handlers except those marked explicitly IRQF_NO_THREAD. - tmem [KNL,XEN] - Enable the Transcendent memory driver if built-in. - - tmem.cleancache=0|1 [KNL, XEN] - Default is on (1). Disable the usage of the cleancache - API to send anonymous pages to the hypervisor. - - tmem.frontswap=0|1 [KNL, XEN] - Default is on (1). Disable the usage of the frontswap - API to send swap pages to the hypervisor. If disabled - the selfballooning and selfshrinking are force disabled. - - tmem.selfballooning=0|1 [KNL, XEN] - Default is on (1). Disable the driving of swap pages - to the hypervisor. - - tmem.selfshrinking=0|1 [KNL, XEN] - Default is on (1). Partial swapoff that immediately - transfers pages from Xen hypervisor back to the - kernel based on different criteria. - topology= [S390] Format: {off | on} Specify if the kernel should make use of the cpu @@ -3351,6 +3330,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. plus one apbt timer for broadcast timer. x86_mrst_timer=apbt_only | lapic_and_apbt + xd= [HW,XT] Original XT pre-IDE (RLL encoded) disks. + xd_geo= See header of drivers/block/xd.c. + xen_emul_unplug= [HW,X86,XEN] Unplug Xen emulated devices Format: [unplug0,][unplug1] diff --git a/trunk/Documentation/kernel-per-CPU-kthreads.txt b/trunk/Documentation/kernel-per-CPU-kthreads.txt deleted file mode 100644 index cbf7ae412da4..000000000000 --- a/trunk/Documentation/kernel-per-CPU-kthreads.txt +++ /dev/null @@ -1,202 +0,0 @@ -REDUCING OS JITTER DUE TO PER-CPU KTHREADS - -This document lists per-CPU kthreads in the Linux kernel and presents -options to control their OS jitter. Note that non-per-CPU kthreads are -not listed here. To reduce OS jitter from non-per-CPU kthreads, bind -them to a "housekeeping" CPU dedicated to such work. - - -REFERENCES - -o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs. - -o Documentation/cgroups: Using cgroups to bind tasks to sets of CPUs. - -o man taskset: Using the taskset command to bind tasks to sets - of CPUs. - -o man sched_setaffinity: Using the sched_setaffinity() system - call to bind tasks to sets of CPUs. - -o /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state, - writing "0" to offline and "1" to online. - -o In order to locate kernel-generated OS jitter on CPU N: - - cd /sys/kernel/debug/tracing - echo 1 > max_graph_depth # Increase the "1" for more detail - echo function_graph > current_tracer - # run workload - cat per_cpu/cpuN/trace - - -KTHREADS - -Name: ehca_comp/%u -Purpose: Periodically process Infiniband-related work. -To reduce its OS jitter, do any of the following: -1. Don't use eHCA Infiniband hardware, instead choosing hardware - that does not require per-CPU kthreads. This will prevent these - kthreads from being created in the first place. (This will - work for most people, as this hardware, though important, is - relatively old and is produced in relatively low unit volumes.) -2. Do all eHCA-Infiniband-related work on other CPUs, including - interrupts. -3. Rework the eHCA driver so that its per-CPU kthreads are - provisioned only on selected CPUs. - - -Name: irq/%d-%s -Purpose: Handle threaded interrupts. -To reduce its OS jitter, do the following: -1. Use irq affinity to force the irq threads to execute on - some other CPU. - -Name: kcmtpd_ctr_%d -Purpose: Handle Bluetooth work. -To reduce its OS jitter, do one of the following: -1. Don't use Bluetooth, in which case these kthreads won't be - created in the first place. -2. Use irq affinity to force Bluetooth-related interrupts to - occur on some other CPU and furthermore initiate all - Bluetooth activity on some other CPU. - -Name: ksoftirqd/%u -Purpose: Execute softirq handlers when threaded or when under heavy load. -To reduce its OS jitter, each softirq vector must be handled -separately as follows: -TIMER_SOFTIRQ: Do all of the following: -1. To the extent possible, keep the CPU out of the kernel when it - is non-idle, for example, by avoiding system calls and by forcing - both kernel threads and interrupts to execute elsewhere. -2. Build with CONFIG_HOTPLUG_CPU=y. After boot completes, force - the CPU offline, then bring it back online. This forces - recurring timers to migrate elsewhere. If you are concerned - with multiple CPUs, force them all offline before bringing the - first one back online. Once you have onlined the CPUs in question, - do not offline any other CPUs, because doing so could force the - timer back onto one of the CPUs in question. -NET_TX_SOFTIRQ and NET_RX_SOFTIRQ: Do all of the following: -1. Force networking interrupts onto other CPUs. -2. Initiate any network I/O on other CPUs. -3. Once your application has started, prevent CPU-hotplug operations - from being initiated from tasks that might run on the CPU to - be de-jittered. (It is OK to force this CPU offline and then - bring it back online before you start your application.) -BLOCK_SOFTIRQ: Do all of the following: -1. Force block-device interrupts onto some other CPU. -2. Initiate any block I/O on other CPUs. -3. Once your application has started, prevent CPU-hotplug operations - from being initiated from tasks that might run on the CPU to - be de-jittered. (It is OK to force this CPU offline and then - bring it back online before you start your application.) -BLOCK_IOPOLL_SOFTIRQ: Do all of the following: -1. Force block-device interrupts onto some other CPU. -2. Initiate any block I/O and block-I/O polling on other CPUs. -3. Once your application has started, prevent CPU-hotplug operations - from being initiated from tasks that might run on the CPU to - be de-jittered. (It is OK to force this CPU offline and then - bring it back online before you start your application.) -TASKLET_SOFTIRQ: Do one or more of the following: -1. Avoid use of drivers that use tasklets. (Such drivers will contain - calls to things like tasklet_schedule().) -2. Convert all drivers that you must use from tasklets to workqueues. -3. Force interrupts for drivers using tasklets onto other CPUs, - and also do I/O involving these drivers on other CPUs. -SCHED_SOFTIRQ: Do all of the following: -1. Avoid sending scheduler IPIs to the CPU to be de-jittered, - for example, ensure that at most one runnable kthread is present - on that CPU. If a thread that expects to run on the de-jittered - CPU awakens, the scheduler will send an IPI that can result in - a subsequent SCHED_SOFTIRQ. -2. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y, - CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU - to be de-jittered is marked as an adaptive-ticks CPU using the - "nohz_full=" boot parameter. This reduces the number of - scheduler-clock interrupts that the de-jittered CPU receives, - minimizing its chances of being selected to do the load balancing - work that runs in SCHED_SOFTIRQ context. -3. To the extent possible, keep the CPU out of the kernel when it - is non-idle, for example, by avoiding system calls and by - forcing both kernel threads and interrupts to execute elsewhere. - This further reduces the number of scheduler-clock interrupts - received by the de-jittered CPU. -HRTIMER_SOFTIRQ: Do all of the following: -1. To the extent possible, keep the CPU out of the kernel when it - is non-idle. For example, avoid system calls and force both - kernel threads and interrupts to execute elsewhere. -2. Build with CONFIG_HOTPLUG_CPU=y. Once boot completes, force the - CPU offline, then bring it back online. This forces recurring - timers to migrate elsewhere. If you are concerned with multiple - CPUs, force them all offline before bringing the first one - back online. Once you have onlined the CPUs in question, do not - offline any other CPUs, because doing so could force the timer - back onto one of the CPUs in question. -RCU_SOFTIRQ: Do at least one of the following: -1. Offload callbacks and keep the CPU in either dyntick-idle or - adaptive-ticks state by doing all of the following: - a. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y, - CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU - to be de-jittered is marked as an adaptive-ticks CPU using - the "nohz_full=" boot parameter. Bind the rcuo kthreads - to housekeeping CPUs, which can tolerate OS jitter. - b. To the extent possible, keep the CPU out of the kernel - when it is non-idle, for example, by avoiding system - calls and by forcing both kernel threads and interrupts - to execute elsewhere. -2. Enable RCU to do its processing remotely via dyntick-idle by - doing all of the following: - a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y. - b. Ensure that the CPU goes idle frequently, allowing other - CPUs to detect that it has passed through an RCU quiescent - state. If the kernel is built with CONFIG_NO_HZ_FULL=y, - userspace execution also allows other CPUs to detect that - the CPU in question has passed through a quiescent state. - c. To the extent possible, keep the CPU out of the kernel - when it is non-idle, for example, by avoiding system - calls and by forcing both kernel threads and interrupts - to execute elsewhere. - -Name: rcuc/%u -Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels. -To reduce its OS jitter, do at least one of the following: -1. Build the kernel with CONFIG_PREEMPT=n. This prevents these - kthreads from being created in the first place, and also obviates - the need for RCU priority boosting. This approach is feasible - for workloads that do not require high degrees of responsiveness. -2. Build the kernel with CONFIG_RCU_BOOST=n. This prevents these - kthreads from being created in the first place. This approach - is feasible only if your workload never requires RCU priority - boosting, for example, if you ensure frequent idle time on all - CPUs that might execute within the kernel. -3. Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y, - which offloads all RCU callbacks to kthreads that can be moved - off of CPUs susceptible to OS jitter. This approach prevents the - rcuc/%u kthreads from having any work to do, so that they are - never awakened. -4. Ensure that the CPU never enters the kernel, and, in particular, - avoid initiating any CPU hotplug operations on this CPU. This is - another way of preventing any callbacks from being queued on the - CPU, again preventing the rcuc/%u kthreads from having any work - to do. - -Name: rcuob/%d, rcuop/%d, and rcuos/%d -Purpose: Offload RCU callbacks from the corresponding CPU. -To reduce its OS jitter, do at least one of the following: -1. Use affinity, cgroups, or other mechanism to force these kthreads - to execute on some other CPU. -2. Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these - kthreads from being created in the first place. However, please - note that this will not eliminate OS jitter, but will instead - shift it to RCU_SOFTIRQ. - -Name: watchdog/%u -Purpose: Detect software lockups on each CPU. -To reduce its OS jitter, do at least one of the following: -1. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these - kthreads from being created in the first place. -2. Echo a zero to /proc/sys/kernel/watchdog to disable the - watchdog timer. -3. Echo a large number of /proc/sys/kernel/watchdog_thresh in - order to reduce the frequency of OS jitter due to the watchdog - timer down to a level that is acceptable for your workload. diff --git a/trunk/Documentation/m68k/kernel-options.txt b/trunk/Documentation/m68k/kernel-options.txt index eaf32a1fd0b1..97d45f276fe6 100644 --- a/trunk/Documentation/m68k/kernel-options.txt +++ b/trunk/Documentation/m68k/kernel-options.txt @@ -80,6 +80,8 @@ Valid names are: /dev/sdd: -> 0x0830 (forth SCSI disk) /dev/sde: -> 0x0840 (fifth SCSI disk) /dev/fd : -> 0x0200 (floppy disk) + /dev/xda: -> 0x0c00 (first XT disk, unused in Linux/m68k) + /dev/xdb: -> 0x0c40 (second XT disk, unused in Linux/m68k) The name must be followed by a decimal number, that stands for the partition number. Internally, the value of the number is just diff --git a/trunk/Documentation/networking/ip-sysctl.txt b/trunk/Documentation/networking/ip-sysctl.txt index 3458d6343e01..f98ca633b528 100644 --- a/trunk/Documentation/networking/ip-sysctl.txt +++ b/trunk/Documentation/networking/ip-sysctl.txt @@ -420,10 +420,10 @@ tcp_synack_retries - INTEGER for a passive TCP connection will happen after 63seconds. tcp_syncookies - BOOLEAN - Only valid when the kernel was compiled with CONFIG_SYN_COOKIES + Only valid when the kernel was compiled with CONFIG_SYNCOOKIES Send out syncookies when the syn backlog queue of a socket overflows. This is to prevent against the common 'SYN flood attack' - Default: 1 + Default: FALSE Note, that syncookies is fallback facility. It MUST NOT be used to help highly loaded servers to stand diff --git a/trunk/Documentation/power/devices.txt b/trunk/Documentation/power/devices.txt index a66c9821b5ce..504dfe4d52eb 100644 --- a/trunk/Documentation/power/devices.txt +++ b/trunk/Documentation/power/devices.txt @@ -268,7 +268,7 @@ situations. System Power Management Phases ------------------------------ Suspending or resuming the system is done in several phases. Different phases -are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the +are used for standby or memory sleep states ("suspend-to-RAM") and the hibernation state ("suspend-to-disk"). Each phase involves executing callbacks for every device before the next phase begins. Not all busses or classes support all these callbacks and not all drivers use all the callbacks. The @@ -309,8 +309,7 @@ execute the corresponding method from dev->driver->pm instead if there is one. Entering System Suspend ----------------------- -When the system goes into the freeze, standby or memory sleep state, -the phases are: +When the system goes into the standby or memory sleep state, the phases are: prepare, suspend, suspend_late, suspend_noirq. @@ -369,7 +368,7 @@ the devices that were suspended. Leaving System Suspend ---------------------- -When resuming from freeze, standby or memory sleep, the phases are: +When resuming from standby or memory sleep, the phases are: resume_noirq, resume_early, resume, complete. @@ -434,8 +433,8 @@ the system log. Entering Hibernation -------------------- -Hibernating the system is more complicated than putting it into the other -sleep states, because it involves creating and saving a system image. +Hibernating the system is more complicated than putting it into the standby or +memory sleep state, because it involves creating and saving a system image. Therefore there are more phases for hibernation, with a different set of callbacks. These phases always run after tasks have been frozen and memory has been freed. @@ -486,8 +485,8 @@ image forms an atomic snapshot of the system state. At this point the system image is saved, and the devices then need to be prepared for the upcoming system shutdown. This is much like suspending them -before putting the system into the freeze, standby or memory sleep state, -and the phases are similar. +before putting the system into the standby or memory sleep state, and the phases +are similar. 9. The prepare phase is discussed above. diff --git a/trunk/Documentation/power/interface.txt b/trunk/Documentation/power/interface.txt index f1f0f59a7c47..c537834af005 100644 --- a/trunk/Documentation/power/interface.txt +++ b/trunk/Documentation/power/interface.txt @@ -7,8 +7,8 @@ running. The interface exists in /sys/power/ directory (assuming sysfs is mounted at /sys). /sys/power/state controls system power state. Reading from this file -returns what states are supported, which is hard-coded to 'freeze', -'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' +returns what states are supported, which is hard-coded to 'standby' +(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' (Suspend-to-Disk). Writing to this file one of those strings causes the system to diff --git a/trunk/Documentation/power/notifiers.txt b/trunk/Documentation/power/notifiers.txt index a81fa254303d..c2a4a346c0d9 100644 --- a/trunk/Documentation/power/notifiers.txt +++ b/trunk/Documentation/power/notifiers.txt @@ -15,10 +15,8 @@ A suspend/hibernation notifier may be used for this purpose. The subsystems or drivers having such needs can register suspend notifiers that will be called upon the following events by the PM core: -PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen - immediately. This is different from PM_SUSPEND_PREPARE - below because here we do additional work between notifiers - and drivers freezing. +PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will + be frozen immediately. PM_POST_HIBERNATION The system memory state has been restored from a hibernation image or an error occurred during diff --git a/trunk/Documentation/power/states.txt b/trunk/Documentation/power/states.txt index 442d43df9b25..4416b28630df 100644 --- a/trunk/Documentation/power/states.txt +++ b/trunk/Documentation/power/states.txt @@ -2,26 +2,12 @@ System Power Management States -The kernel supports four power management states generically, though -one is generic and the other three are dependent on platform support -code to implement the low-level details for each state. -This file describes each state, what they are +The kernel supports three power management states generically, though +each is dependent on platform support code to implement the low-level +details for each state. This file describes each state, what they are commonly called, what ACPI state they map to, and what string to write to /sys/power/state to enter that state -state: Freeze / Low-Power Idle -ACPI state: S0 -String: "freeze" - -This state is a generic, pure software, light-weight, low-power state. -It allows more energy to be saved relative to idle by freezing user -space and putting all I/O devices into low-power states (possibly -lower-power than available at run time), such that the processors can -spend more time in their idle states. -This state can be used for platforms without Standby/Suspend-to-RAM -support, or it can be used in addition to Suspend-to-RAM (memory sleep) -to provide reduced resume latency. - State: Standby / Power-On Suspend ACPI State: S1 @@ -36,6 +22,9 @@ We try to put devices in a low-power state equivalent to D1, which also offers low power savings, but low resume latency. Not all devices support D1, and those that don't are left on. +A transition from Standby to the On state should take about 1-2 +seconds. + State: Suspend-to-RAM ACPI State: S3 @@ -53,6 +42,9 @@ transition back to the On state. For at least ACPI, STR requires some minimal boot-strapping code to resume the system from STR. This may be true on other platforms. +A transition from Suspend-to-RAM to the On state should take about +3-5 seconds. + State: Suspend-to-disk ACPI State: S4 @@ -82,3 +74,7 @@ low-power state (like ACPI S4), or it may simply power down. Powering down offers greater savings, and allows this mechanism to work on any system. However, entering a real low-power state allows the user to trigger wake up events (e.g. pressing a key or opening a laptop lid). + +A transition from Suspend-to-Disk to the On state should take about 30 +seconds, though it's typically a bit more with the current +implementation. diff --git a/trunk/Documentation/powerpc/transactional_memory.txt b/trunk/Documentation/powerpc/transactional_memory.txt index dc23e58ae264..c907be41d60f 100644 --- a/trunk/Documentation/powerpc/transactional_memory.txt +++ b/trunk/Documentation/powerpc/transactional_memory.txt @@ -147,25 +147,6 @@ Example signal handler: fix_the_problem(ucp->dar); } -When in an active transaction that takes a signal, we need to be careful with -the stack. It's possible that the stack has moved back up after the tbegin. -The obvious case here is when the tbegin is called inside a function that -returns before a tend. In this case, the stack is part of the checkpointed -transactional memory state. If we write over this non transactionally or in -suspend, we are in trouble because if we get a tm abort, the program counter and -stack pointer will be back at the tbegin but our in memory stack won't be valid -anymore. - -To avoid this, when taking a signal in an active transaction, we need to use -the stack pointer from the checkpointed state, rather than the speculated -state. This ensures that the signal context (written tm suspended) will be -written below the stack required for the rollback. The transaction is aborted -becuase of the treclaim, so any memory written between the tbegin and the -signal will be rolled back anyway. - -For signals taken in non-TM or suspended mode, we use the -normal/non-checkpointed stack pointer. - Failure cause codes used by kernel ================================== @@ -174,18 +155,14 @@ These are defined in , and distinguish different reasons why the kernel aborted a transaction: TM_CAUSE_RESCHED Thread was rescheduled. - TM_CAUSE_TLBI Software TLB invalide. TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort transactions for consistency will use this. TM_CAUSE_SIGNAL Signal delivered. TM_CAUSE_MISC Currently unused. - TM_CAUSE_ALIGNMENT Alignment fault. - TM_CAUSE_EMULATE Emulation that touched memory. -These can be checked by the user program's abort handler as TEXASR[0:7]. If -bit 7 is set, it indicates that the error is consider persistent. For example -a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.q +These can be checked by the user program's abort handler as TEXASR[0:7]. + GDB === diff --git a/trunk/Documentation/rapidio/rapidio.txt b/trunk/Documentation/rapidio/rapidio.txt index a9c16c979da2..c75694b35d08 100644 --- a/trunk/Documentation/rapidio/rapidio.txt +++ b/trunk/Documentation/rapidio/rapidio.txt @@ -79,63 +79,20 @@ master port that is used to communicate with devices within the network. In order to initialize the RapidIO subsystem, a platform must initialize and register at least one master port within the RapidIO network. To register mport within the subsystem controller driver initialization code calls function -rio_register_mport() for each available master port. +rio_register_mport() for each available master port. After all active master +ports are registered with a RapidIO subsystem, the rio_init_mports() routine +is called to perform enumeration and discovery. -RapidIO subsystem uses subsys_initcall() or device_initcall() to perform -controller initialization (depending on controller device type). - -After all active master ports are registered with a RapidIO subsystem, -an enumeration and/or discovery routine may be called automatically or -by user-space command. +In the current PowerPC-based implementation a subsys_initcall() is specified to +perform controller initialization and mport registration. At the end it directly +calls rio_init_mports() to execute RapidIO enumeration and discovery. 4. Enumeration and Discovery ---------------------------- -4.1 Overview ------------- - -RapidIO subsystem configuration options allow users to specify enumeration and -discovery methods as statically linked components or loadable modules. -An enumeration/discovery method implementation and available input parameters -define how any given method can be attached to available RapidIO mports: -simply to all available mports OR individually to the specified mport device. - -Depending on selected enumeration/discovery build configuration, there are -several methods to initiate an enumeration and/or discovery process: - - (a) Statically linked enumeration and discovery process can be started - automatically during kernel initialization time using corresponding module - parameters. This was the original method used since introduction of RapidIO - subsystem. Now this method relies on enumerator module parameter which is - 'rio-scan.scan' for existing basic enumeration/discovery method. - When automatic start of enumeration/discovery is used a user has to ensure - that all discovering endpoints are started before the enumerating endpoint - and are waiting for enumeration to be completed. - Configuration option CONFIG_RAPIDIO_DISC_TIMEOUT defines time that discovering - endpoint waits for enumeration to be completed. If the specified timeout - expires the discovery process is terminated without obtaining RapidIO network - information. NOTE: a timed out discovery process may be restarted later using - a user-space command as it is described later if the given endpoint was - enumerated successfully. - - (b) Statically linked enumeration and discovery process can be started by - a command from user space. This initiation method provides more flexibility - for a system startup compared to the option (a) above. After all participating - endpoints have been successfully booted, an enumeration process shall be - started first by issuing a user-space command, after an enumeration is - completed a discovery process can be started on all remaining endpoints. - - (c) Modular enumeration and discovery process can be started by a command from - user space. After an enumeration/discovery module is loaded, a network scan - process can be started by issuing a user-space command. - Similar to the option (b) above, an enumerator has to be started first. - - (d) Modular enumeration and discovery process can be started by a module - initialization routine. In this case an enumerating module shall be loaded - first. - -When a network scan process is started it calls an enumeration or discovery -routine depending on the configured role of a master port: host or agent. +When rio_init_mports() is called it scans a list of registered master ports and +calls an enumeration or discovery routine depending on the configured role of a +master port: host or agent. Enumeration is performed by a master port if it is configured as a host port by assigning a host device ID greater than or equal to zero. A host device ID is @@ -147,58 +104,8 @@ for it. The enumeration and discovery routines use RapidIO maintenance transactions to access the configuration space of devices. -4.2 Automatic Start of Enumeration and Discovery ------------------------------------------------- - -Automatic enumeration/discovery start method is applicable only to built-in -enumeration/discovery RapidIO configuration selection. To enable automatic -enumeration/discovery start by existing basic enumerator method set use boot -command line parameter "rio-scan.scan=1". - -This configuration requires synchronized start of all RapidIO endpoints that -form a network which will be enumerated/discovered. Discovering endpoints have -to be started before an enumeration starts to ensure that all RapidIO -controllers have been initialized and are ready to be discovered. Configuration -parameter CONFIG_RAPIDIO_DISC_TIMEOUT defines time (in seconds) which -a discovering endpoint will wait for enumeration to be completed. - -When automatic enumeration/discovery start is selected, basic method's -initialization routine calls rio_init_mports() to perform enumeration or -discovery for all known mport devices. - -Depending on RapidIO network size and configuration this automatic -enumeration/discovery start method may be difficult to use due to the -requirement for synchronized start of all endpoints. - -4.3 User-space Start of Enumeration and Discovery -------------------------------------------------- - -User-space start of enumeration and discovery can be used with built-in and -modular build configurations. For user-space controlled start RapidIO subsystem -creates the sysfs write-only attribute file '/sys/bus/rapidio/scan'. To initiate -an enumeration or discovery process on specific mport device, a user needs to -write mport_ID (not RapidIO destination ID) into that file. The mport_ID is a -sequential number (0 ... RIO_MAX_MPORTS) assigned during mport device -registration. For example for machine with single RapidIO controller, mport_ID -for that controller always will be 0. - -To initiate RapidIO enumeration/discovery on all available mports a user may -write '-1' (or RIO_MPORT_ANY) into the scan attribute file. - -4.4 Basic Enumeration Method ----------------------------- - -This is an original enumeration/discovery method which is available since -first release of RapidIO subsystem code. The enumeration process is -implemented according to the enumeration algorithm outlined in the RapidIO -Interconnect Specification: Annex I [1]. - -This method can be configured as statically linked or loadable module. -The method's single parameter "scan" allows to trigger the enumeration/discovery -process from module initialization routine. - -This enumeration/discovery method can be started only once and does not support -unloading if it is built as a module. +The enumeration process is implemented according to the enumeration algorithm +outlined in the RapidIO Interconnect Specification: Annex I [1]. The enumeration process traverses the network using a recursive depth-first algorithm. When a new device is found, the enumerator takes ownership of that @@ -253,19 +160,6 @@ time period. If this wait time period expires before enumeration is completed, an agent skips RapidIO discovery and continues with remaining kernel initialization. -4.5 Adding New Enumeration/Discovery Method -------------------------------------------- - -RapidIO subsystem code organization allows addition of new enumeration/discovery -methods as new configuration options without significant impact to to the core -RapidIO code. - -A new enumeration/discovery method has to be attached to one or more mport -devices before an enumeration/discovery process can be started. Normally, -method's module initialization routine calls rio_register_scan() to attach -an enumerator to a specified mport device (or devices). The basic enumerator -implementation demonstrates this process. - 5. References ------------- diff --git a/trunk/Documentation/rapidio/sysfs.txt b/trunk/Documentation/rapidio/sysfs.txt index 19878179da4c..97f71ce575d6 100644 --- a/trunk/Documentation/rapidio/sysfs.txt +++ b/trunk/Documentation/rapidio/sysfs.txt @@ -88,20 +88,3 @@ that exports additional attributes. IDT_GEN2: errlog - reads contents of device error log until it is empty. - - -5. RapidIO Bus Attributes -------------------------- - -RapidIO bus subdirectory /sys/bus/rapidio implements the following bus-specific -attribute: - - scan - allows to trigger enumeration discovery process from user space. This - is a write-only attribute. To initiate an enumeration or discovery - process on specific mport device, a user needs to write mport_ID (not - RapidIO destination ID) into this file. The mport_ID is a sequential - number (0 ... RIO_MAX_MPORTS) assigned to the mport device. - For example, for a machine with a single RapidIO controller, mport_ID - for that controller always will be 0. - To initiate RapidIO enumeration/discovery on all available mports - a user must write '-1' (or RIO_MPORT_ANY) into this attribute file. diff --git a/trunk/Documentation/sound/alsa/HD-Audio-Models.txt b/trunk/Documentation/sound/alsa/HD-Audio-Models.txt index 77d68e23b247..bb8b0dc532b8 100644 --- a/trunk/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/trunk/Documentation/sound/alsa/HD-Audio-Models.txt @@ -29,8 +29,6 @@ ALC269/270/275/276/280/282 alc271-dmic Enable ALC271X digital mic workaround inv-dmic Inverted internal mic workaround lenovo-dock Enables docking station I/O for some Lenovos - dell-headset-multi Headset jack, which can also be used as mic-in - dell-headset-dock Headset jack (without mic-in), and also dock I/O ALC662/663/272 ============== @@ -44,7 +42,6 @@ ALC662/663/272 asus-mode7 ASUS asus-mode8 ASUS inv-dmic Inverted internal mic workaround - dell-headset-multi Headset jack, which can also be used as mic-in ALC680 ====== diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index ad7e322ad17b..3d7782b9f90d 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -2890,8 +2890,8 @@ F: drivers/media/dvb-frontends/ec100* ECRYPT FILE SYSTEM M: Tyler Hicks +M: Dustin Kirkland L: ecryptfs@vger.kernel.org -W: http://ecryptfs.org W: https://launchpad.net/ecryptfs S: Supported F: Documentation/filesystems/ecryptfs.txt @@ -3220,7 +3220,7 @@ F: lib/fault-inject.c FCOE SUBSYSTEM (libfc, libfcoe, fcoe) M: Robert Love -L: fcoe-devel@open-fcoe.org +L: devel@open-fcoe.org W: www.Open-FCoE.org S: Supported F: drivers/scsi/libfc/ @@ -3322,12 +3322,11 @@ F: drivers/net/wan/dlci.c F: drivers/net/wan/sdla.c FRAMEBUFFER LAYER -M: Jean-Christophe Plagniol-Villard -M: Tomi Valkeinen +M: Florian Tobias Schandinat L: linux-fbdev@vger.kernel.org W: http://linux-fbdev.sourceforge.net/ Q: http://patchwork.kernel.org/project/linux-fbdev/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/plagnioj/linux-fbdev.git +T: git git://github.com/schandinat/linux-2.6.git fbdev-next S: Maintained F: Documentation/fb/ F: Documentation/devicetree/bindings/fb/ @@ -3866,16 +3865,9 @@ M: K. Y. Srinivasan M: Haiyang Zhang L: devel@linuxdriverproject.org S: Maintained -F: arch/x86/include/asm/mshyperv.h -F: arch/x86/include/uapi/asm/hyperv.h -F: arch/x86/kernel/cpu/mshyperv.c -F: drivers/hid/hid-hyperv.c F: drivers/hv/ +F: drivers/hid/hid-hyperv.c F: drivers/net/hyperv/ -F: drivers/scsi/storvsc_drv.c -F: drivers/video/hyperv_fb.c -F: include/linux/hyperv.h -F: tools/hv/ I2C OVER PARALLEL PORT M: Jean Delvare @@ -4448,16 +4440,6 @@ S: Maintained F: drivers/scsi/*iscsi* F: include/scsi/*iscsi* -ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR -M: Or Gerlitz -M: Roi Dayan -L: linux-rdma@vger.kernel.org -S: Supported -W: http://www.openfabrics.org -W: www.open-iscsi.org -Q: http://patchwork.kernel.org/project/linux-rdma/list/ -F: drivers/infiniband/ulp/iser - ISDN SUBSYSTEM M: Karsten Keil L: isdn4linux@listserv.isdn4linux.de (subscribers-only) @@ -4659,13 +4641,12 @@ F: include/linux/sunrpc/ F: include/uapi/linux/sunrpc/ KERNEL VIRTUAL MACHINE (KVM) +M: Marcelo Tosatti M: Gleb Natapov -M: Paolo Bonzini L: kvm@vger.kernel.org -W: http://linux-kvm.org +W: http://kvm.qumranet.com S: Supported -F: Documentation/*/kvm*.txt -F: Documentation/virtual/kvm/ +F: Documentation/*/kvm.txt F: arch/*/kvm/ F: arch/*/include/asm/kvm* F: include/linux/kvm* @@ -4995,13 +4976,6 @@ S: Maintained F: Documentation/hwmon/lm90 F: drivers/hwmon/lm90.c -LM95234 HARDWARE MONITOR DRIVER -M: Guenter Roeck -L: lm-sensors@lm-sensors.org -S: Maintained -F: Documentation/hwmon/lm95234 -F: drivers/hwmon/lm95234.c - LME2510 MEDIA DRIVER M: Malcolm Priestley L: linux-media@vger.kernel.org @@ -5535,18 +5509,18 @@ F: Documentation/networking/s2io.txt F: Documentation/networking/vxge.txt F: drivers/net/ethernet/neterion/ -NETFILTER/IPTABLES +NETFILTER/IPTABLES/IPCHAINS +P: Harald Welte +P: Jozsef Kadlecsik M: Pablo Neira Ayuso M: Patrick McHardy -M: Jozsef Kadlecsik L: netfilter-devel@vger.kernel.org L: netfilter@vger.kernel.org L: coreteam@netfilter.org W: http://www.netfilter.org/ W: http://www.iptables.org/ -Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git +T: git git://1984.lsi.us.es/nf +T: git git://1984.lsi.us.es/nf-next S: Supported F: include/linux/netfilter* F: include/linux/netfilter/ @@ -5766,7 +5740,7 @@ M: Matthew Wilcox L: linux-nvme@lists.infradead.org T: git git://git.infradead.org/users/willy/linux-nvme.git S: Supported -F: drivers/block/nvme* +F: drivers/block/nvme.c F: include/linux/nvme.h OMAP SUPPORT @@ -6095,18 +6069,9 @@ L: linux-parisc@vger.kernel.org W: http://www.parisc-linux.org/ Q: http://patchwork.kernel.org/project/linux-parisc/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git S: Maintained F: arch/parisc/ -F: Documentation/parisc/ F: drivers/parisc/ -F: drivers/char/agp/parisc-agp.c -F: drivers/input/serio/gscps2.c -F: drivers/parport/parport_gsc.* -F: drivers/tty/serial/8250/8250_gsc.c -F: drivers/video/sti* -F: drivers/video/console/sti* -F: drivers/video/logo/logo_parisc* PC87360 HARDWARE MONITORING DRIVER M: Jim Cromie @@ -7624,7 +7589,7 @@ F: drivers/clk/spear/ SPI SUBSYSTEM M: Mark Brown M: Grant Likely -L: linux-spi@vger.kernel.org +L: spi-devel-general@lists.sourceforge.net T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git Q: http://patchwork.kernel.org/project/spi-devel-general/list/ S: Maintained @@ -7889,7 +7854,7 @@ L: linux-scsi@vger.kernel.org L: target-devel@vger.kernel.org L: http://groups.google.com/group/linux-iscsi-target-dev W: http://www.linux-iscsi.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master +T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master S: Supported F: drivers/target/ F: include/target/ @@ -8217,13 +8182,6 @@ F: drivers/mmc/host/sh_mobile_sdhi.c F: include/linux/mmc/tmio.h F: include/linux/mmc/sh_mobile_sdhi.h -TMP401 HARDWARE MONITOR DRIVER -M: Guenter Roeck -L: lm-sensors@lm-sensors.org -S: Maintained -F: Documentation/hwmon/tmp401 -F: drivers/hwmon/tmp401.c - TMPFS (SHMEM FILESYSTEM) M: Hugh Dickins L: linux-mm@kvack.org @@ -9004,7 +8962,7 @@ S: Maintained F: drivers/net/wireless/wl3501* WM97XX TOUCHSCREEN DRIVERS -M: Mark Brown +M: Mark Brown M: Liam Girdwood L: linux-input@vger.kernel.org T: git git://opensource.wolfsonmicro.com/linux-2.6-touch @@ -9014,6 +8972,7 @@ F: drivers/input/touchscreen/*wm97* F: include/linux/wm97xx.h WOLFSON MICROELECTRONICS DRIVERS +M: Mark Brown L: patches@opensource.wolfsonmicro.com T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus diff --git a/trunk/Makefile b/trunk/Makefile index e5e3ba085191..cd11e8857604 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc1 NAME = Unicycling Gorilla # *DOCUMENTATION* diff --git a/trunk/arch/Kconfig b/trunk/arch/Kconfig index a4429bcd609e..dd0e8eb8042f 100644 --- a/trunk/arch/Kconfig +++ b/trunk/arch/Kconfig @@ -213,9 +213,6 @@ config USE_GENERIC_SMP_HELPERS config GENERIC_SMP_IDLE_THREAD bool -config GENERIC_IDLE_POLL_SETUP - bool - # Select if arch init_task initializer is different to init/init_task.c config ARCH_INIT_TASK bool diff --git a/trunk/arch/alpha/include/asm/pgtable.h b/trunk/arch/alpha/include/asm/pgtable.h index d8f9b7e89234..81a4342d5a3f 100644 --- a/trunk/arch/alpha/include/asm/pgtable.h +++ b/trunk/arch/alpha/include/asm/pgtable.h @@ -354,6 +354,9 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define kern_addr_valid(addr) (1) #endif +#define io_remap_pfn_range(vma, start, pfn, size, prot) \ + remap_pfn_range(vma, start, pfn, size, prot) + #define pte_ERROR(e) \ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c index 1402fcc11c2c..b9e37ad6fa19 100644 --- a/trunk/arch/alpha/kernel/osf_sys.c +++ b/trunk/arch/alpha/kernel/osf_sys.c @@ -96,7 +96,6 @@ struct osf_dirent { }; struct osf_dirent_callback { - struct dir_context ctx; struct osf_dirent __user *dirent; long __user *basep; unsigned int count; @@ -147,17 +146,17 @@ SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd, { int error; struct fd arg = fdget(fd); - struct osf_dirent_callback buf = { - .ctx.actor = osf_filldir, - .dirent = dirent, - .basep = basep, - .count = count - }; + struct osf_dirent_callback buf; if (!arg.file) return -EBADF; - error = iterate_dir(arg.file, &buf.ctx); + buf.dirent = dirent; + buf.basep = basep; + buf.count = count; + buf.error = 0; + + error = vfs_readdir(arg.file, osf_filldir, &buf); if (error >= 0) error = buf.error; if (count != buf.count) diff --git a/trunk/arch/alpha/kernel/pci-sysfs.c b/trunk/arch/alpha/kernel/pci-sysfs.c index 2b183b0d3207..b51f7b4818cd 100644 --- a/trunk/arch/alpha/kernel/pci-sysfs.c +++ b/trunk/arch/alpha/kernel/pci-sysfs.c @@ -26,6 +26,7 @@ static int hose_mmap_page_range(struct pci_controller *hose, base = sparse ? hose->sparse_io_base : hose->dense_io_base; vma->vm_pgoff += base >> PAGE_SHIFT; + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, diff --git a/trunk/arch/arc/boot/dts/abilis_tb100_dvk.dts b/trunk/arch/arc/boot/dts/abilis_tb100_dvk.dts index 0fa0d4abe795..c0fd3623c393 100644 --- a/trunk/arch/arc/boot/dts/abilis_tb100_dvk.dts +++ b/trunk/arch/arc/boot/dts/abilis_tb100_dvk.dts @@ -37,7 +37,7 @@ soc100 { uart@FF100000 { - pinctrl-names = "default"; + pinctrl-names = "abilis,simple-default"; pinctrl-0 = <&pctl_uart0>; }; ethernet@FE100000 { diff --git a/trunk/arch/arc/boot/dts/abilis_tb101_dvk.dts b/trunk/arch/arc/boot/dts/abilis_tb101_dvk.dts index a4d80ce283ae..6f8c381f6268 100644 --- a/trunk/arch/arc/boot/dts/abilis_tb101_dvk.dts +++ b/trunk/arch/arc/boot/dts/abilis_tb101_dvk.dts @@ -37,7 +37,7 @@ soc100 { uart@FF100000 { - pinctrl-names = "default"; + pinctrl-names = "abilis,simple-default"; pinctrl-0 = <&pctl_uart0>; }; ethernet@FE100000 { diff --git a/trunk/arch/arc/boot/dts/abilis_tb10x.dtsi b/trunk/arch/arc/boot/dts/abilis_tb10x.dtsi index b97e3051ba4b..a6139fc5aaa3 100644 --- a/trunk/arch/arc/boot/dts/abilis_tb10x.dtsi +++ b/trunk/arch/arc/boot/dts/abilis_tb10x.dtsi @@ -88,7 +88,8 @@ }; uart@FF100000 { - compatible = "snps,dw-apb-uart"; + compatible = "snps,dw-apb-uart", + "abilis,simple-pinctrl"; reg = <0xFF100000 0x100>; clock-frequency = <166666666>; interrupts = <25 1>; @@ -183,7 +184,8 @@ #address-cells = <1>; #size-cells = <0>; cell-index = <1>; - compatible = "abilis,tb100-spi"; + compatible = "abilis,tb100-spi", + "abilis,simple-pinctrl"; num-cs = <2>; reg = <0xFE011000 0x20>; interrupt-parent = <&tb10x_ictl>; diff --git a/trunk/arch/arc/include/asm/cacheflush.h b/trunk/arch/arc/include/asm/cacheflush.h index ef62682e8d95..9f841af41092 100644 --- a/trunk/arch/arc/include/asm/cacheflush.h +++ b/trunk/arch/arc/include/asm/cacheflush.h @@ -93,16 +93,14 @@ static inline int cache_is_vipt_aliasing(void) #endif } -#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1) +#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3) /* * checks if two addresses (after page aligning) index into same cache set */ #define addr_not_cache_congruent(addr1, addr2) \ -({ \ cache_is_vipt_aliasing() ? \ - (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \ -}) + (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ diff --git a/trunk/arch/arc/include/asm/page.h b/trunk/arch/arc/include/asm/page.h index ab84bf131fe1..374a35514116 100644 --- a/trunk/arch/arc/include/asm/page.h +++ b/trunk/arch/arc/include/asm/page.h @@ -19,6 +19,13 @@ #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) +#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING + +#define clear_user_page(addr, vaddr, pg) clear_page(addr) +#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) + +#else /* VIPT aliasing dcache */ + struct vm_area_struct; struct page; @@ -28,6 +35,8 @@ void copy_user_highpage(struct page *to, struct page *from, unsigned long u_vaddr, struct vm_area_struct *vma); void clear_user_page(void *to, unsigned long u_vaddr, struct page *page); +#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ + #undef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS diff --git a/trunk/arch/arc/include/asm/pgtable.h b/trunk/arch/arc/include/asm/pgtable.h index c110ac87d22b..1cc4720faccb 100644 --- a/trunk/arch/arc/include/asm/pgtable.h +++ b/trunk/arch/arc/include/asm/pgtable.h @@ -57,9 +57,9 @@ #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ -#define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */ -#define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */ -#define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */ +#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ +#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ +#define _PAGE_READ (1<<5) /* Page has user read perm (H) */ #define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */ #define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */ #define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */ @@ -72,9 +72,9 @@ /* PD1 */ #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ -#define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */ -#define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */ -#define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */ +#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ +#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ +#define _PAGE_READ (1<<3) /* Page has user read perm (H) */ #define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */ #define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */ #define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */ @@ -93,8 +93,7 @@ #endif /* Kernel allowed all permissions for all pages */ -#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \ - _PAGE_GLOBAL | _PAGE_PRESENT) +#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) #ifdef CONFIG_ARC_CACHE_PAGES #define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE @@ -107,11 +106,7 @@ * -by default cached, unless config otherwise * -present in memory */ -#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) - -#define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ) -#define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE) -#define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE) +#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) @@ -130,10 +125,11 @@ * kernel vaddr space - visible in all addr spaces, but kernel mode only * Thus Global, all-kernel-access, no-user-access, cached */ -#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) +#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL) /* ioremap */ -#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) +#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \ + _PAGE_GLOBAL) /************************************************************************** * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) @@ -394,6 +390,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, * remap a physical page `pfn' of size `size' with page protection `prot' * into virtual address `from' */ +#define io_remap_pfn_range(vma, from, pfn, size, prot) \ + remap_pfn_range(vma, from, pfn, size, prot) + #include /* to cope with aliasing VIPT cache */ diff --git a/trunk/arch/arc/include/asm/tlb.h b/trunk/arch/arc/include/asm/tlb.h index cb0c708ca665..85b6df839bd7 100644 --- a/trunk/arch/arc/include/asm/tlb.h +++ b/trunk/arch/arc/include/asm/tlb.h @@ -16,7 +16,7 @@ /* Masks for actual TLB "PD"s */ #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) #define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ - _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \ + _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) #ifndef __ASSEMBLY__ diff --git a/trunk/arch/arc/mm/cache_arc700.c b/trunk/arch/arc/mm/cache_arc700.c index aedce1905441..2f12bca8aef3 100644 --- a/trunk/arch/arc/mm/cache_arc700.c +++ b/trunk/arch/arc/mm/cache_arc700.c @@ -610,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) local_irq_save(flags); __ic_line_inv_vaddr(paddr, vaddr, len); - __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); + __dc_line_op(paddr, vaddr, len, OP_FLUSH); local_irq_restore(flags); } @@ -676,17 +676,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, flush_cache_all(); } -void flush_anon_page(struct vm_area_struct *vma, struct page *page, - unsigned long u_vaddr) -{ - /* TBD: do we really need to clear the kernel mapping */ - __flush_dcache_page(page_address(page), u_vaddr); - __flush_dcache_page(page_address(page), page_address(page)); - -} - -#endif - void copy_user_highpage(struct page *to, struct page *from, unsigned long u_vaddr, struct vm_area_struct *vma) { @@ -736,6 +725,16 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) set_bit(PG_arch_1, &page->flags); } +void flush_anon_page(struct vm_area_struct *vma, struct page *page, + unsigned long u_vaddr) +{ + /* TBD: do we really need to clear the kernel mapping */ + __flush_dcache_page(page_address(page), u_vaddr); + __flush_dcache_page(page_address(page), page_address(page)); + +} + +#endif /********************************************************************** * Explicit Cache flush request from user space via syscall diff --git a/trunk/arch/arc/mm/tlb.c b/trunk/arch/arc/mm/tlb.c index fe1c5a073afe..066145b5f348 100644 --- a/trunk/arch/arc/mm/tlb.c +++ b/trunk/arch/arc/mm/tlb.c @@ -444,8 +444,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, * so userspace sees the right data. * (Avoids the flush for Non-exec + congruent mapping case) */ - if ((vma->vm_flags & VM_EXEC) || - addr_not_cache_congruent(paddr, vaddr)) { + if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) { struct page *page = pfn_to_page(pte_pfn(*ptep)); int dirty = test_and_clear_bit(PG_arch_1, &page->flags); diff --git a/trunk/arch/arc/mm/tlbex.S b/trunk/arch/arc/mm/tlbex.S index 3357d26ffe54..9df765dc7c3a 100644 --- a/trunk/arch/arc/mm/tlbex.S +++ b/trunk/arch/arc/mm/tlbex.S @@ -277,7 +277,7 @@ ARC_ENTRY EV_TLBMissI ;---------------------------------------------------------------- ; VERIFY_PTE: Check if PTE permissions approp for executing code cmp_s r2, VMALLOC_START - mov.lo r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE) + mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE) mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE) and r3, r0, r2 ; Mask out NON Flag bits from PTE @@ -320,9 +320,9 @@ ARC_ENTRY EV_TLBMissD mov_s r2, 0 lr r3, [ecr] btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access - or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE + or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access - or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE + or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE ; Above laddering takes care of XCHG access ; which is both Read and Write diff --git a/trunk/arch/arc/plat-tb10x/tb10x.c b/trunk/arch/arc/plat-tb10x/tb10x.c index 06cb30929460..d3567691c7e1 100644 --- a/trunk/arch/arc/plat-tb10x/tb10x.c +++ b/trunk/arch/arc/plat-tb10x/tb10x.c @@ -34,6 +34,31 @@ static void __init tb10x_platform_init(void) of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } +static void __init tb10x_platform_late_init(void) +{ + struct device_node *dn; + + /* + * Pinctrl documentation recommends setting up the iomux here for + * all modules which don't require control over the pins themselves. + * Modules which need this kind of assistance are compatible with + * "abilis,simple-pinctrl", i.e. we can easily iterate over them. + * TODO: Does this recommended method work cleanly with pins required + * by modules? + */ + for_each_compatible_node(dn, NULL, "abilis,simple-pinctrl") { + struct platform_device *pd = of_find_device_by_node(dn); + struct pinctrl *pctl; + + pctl = pinctrl_get_select(&pd->dev, "abilis,simple-default"); + if (IS_ERR(pctl)) { + int ret = PTR_ERR(pctl); + dev_err(&pd->dev, "Could not set up pinctrl: %d\n", + ret); + } + } +} + static const char *tb10x_compat[] __initdata = { "abilis,arc-tb10x", NULL, @@ -42,4 +67,5 @@ static const char *tb10x_compat[] __initdata = { MACHINE_START(TB10x, "tb10x") .dt_compat = tb10x_compat, .init_machine = tb10x_platform_init, + .init_late = tb10x_platform_late_init, MACHINE_END diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index 136f263ed47b..d423d58f938d 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -38,7 +38,6 @@ config ARM select HAVE_GENERIC_HARDIRQS select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) select HAVE_IDE if PCI || ISA || PCMCIA - select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZO @@ -489,7 +488,7 @@ config ARCH_IXP4XX config ARCH_DOVE bool "Marvell Dove" select ARCH_REQUIRE_GPIOLIB - select CPU_PJ4 + select CPU_V7 select GENERIC_CLOCKEVENTS select MIGHT_HAVE_PCI select PINCTRL @@ -1087,20 +1086,6 @@ if !MMU source "arch/arm/Kconfig-nommu" endif -config PJ4B_ERRATA_4742 - bool "PJ4B Errata 4742: IDLE Wake Up Commands can Cause the CPU Core to Cease Operation" - depends on CPU_PJ4B && MACH_ARMADA_370 - default y - help - When coming out of either a Wait for Interrupt (WFI) or a Wait for - Event (WFE) IDLE states, a specific timing sensitivity exists between - the retiring WFI/WFE instructions and the newly issued subsequent - instructions. This sensitivity can result in a CPU hang scenario. - Workaround: - The software must insert either a Data Synchronization Barrier (DSB) - or Data Memory Barrier (DMB) command immediately after the WFI/WFE - instruction - config ARM_ERRATA_326103 bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" depends on CPU_V6 @@ -1203,16 +1188,6 @@ config PL310_ERRATA_588369 is not correctly implemented in PL310 as clean lines are not invalidated as a result of these operations. -config ARM_ERRATA_643719 - bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" - depends on CPU_V7 && SMP - help - This option enables the workaround for the 643719 Cortex-A9 (prior to - r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR - register returns zero when it should return one. The workaround - corrects this value, ensuring cache maintenance operations which use - it behave as intended and avoiding data corruption. - config ARM_ERRATA_720789 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" depends on CPU_V7 @@ -2030,7 +2005,7 @@ config XIP_PHYS_ADDR config KEXEC bool "Kexec system call (EXPERIMENTAL)" - depends on (!SMP || PM_SLEEP_SMP) + depends on (!SMP || HOTPLUG_CPU) help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/trunk/arch/arm/Makefile b/trunk/arch/arm/Makefile index 1ba358ba16b8..47374085befd 100644 --- a/trunk/arch/arm/Makefile +++ b/trunk/arch/arm/Makefile @@ -309,7 +309,7 @@ define archhelp echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)' echo ' uImage - U-Boot wrapped zImage' - echo ' bootpImage - Combined zImage and initial RAM disk' + echo ' bootpImage - Combined zImage and initial RAM disk' echo ' (supply initrd image via make variable INITRD=)' echo '* dtbs - Build device tree blobs for enabled boards' echo ' install - Install uncompressed kernel' diff --git a/trunk/arch/arm/boot/compressed/Makefile b/trunk/arch/arm/boot/compressed/Makefile index 120b83bfde20..3580d57ea218 100644 --- a/trunk/arch/arm/boot/compressed/Makefile +++ b/trunk/arch/arm/boot/compressed/Makefile @@ -116,8 +116,7 @@ targets := vmlinux vmlinux.lds \ # Make sure files are removed during clean extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ - lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \ - hyp-stub.S + lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) ifeq ($(CONFIG_FUNCTION_TRACER),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) @@ -125,7 +124,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) endif ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) -asflags-y := -DZIMAGE +asflags-y := -Wa,-march=all -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \ diff --git a/trunk/arch/arm/boot/compressed/debug.S b/trunk/arch/arm/boot/compressed/debug.S index 5392ee63338f..6e8382d5b7a4 100644 --- a/trunk/arch/arm/boot/compressed/debug.S +++ b/trunk/arch/arm/boot/compressed/debug.S @@ -1,8 +1,6 @@ #include #include -#ifndef CONFIG_DEBUG_SEMIHOSTING - #include CONFIG_DEBUG_LL_INCLUDE ENTRY(putc) @@ -12,29 +10,3 @@ ENTRY(putc) busyuart r3, r1 mov pc, lr ENDPROC(putc) - -#else - -ENTRY(putc) - adr r1, 1f - ldmia r1, {r2, r3} - add r2, r2, r1 - ldr r1, [r2, r3] - strb r0, [r1] - mov r0, #0x03 @ SYS_WRITEC - ARM( svc #0x123456 ) - THUMB( svc #0xab ) - mov pc, lr - .align 2 -1: .word _GLOBAL_OFFSET_TABLE_ - . - .word semi_writec_buf(GOT) -ENDPROC(putc) - - .bss - .global semi_writec_buf - .type semi_writec_buf, %object -semi_writec_buf: - .space 4 - .size semi_writec_buf, 4 - -#endif diff --git a/trunk/arch/arm/boot/compressed/head-sa1100.S b/trunk/arch/arm/boot/compressed/head-sa1100.S index 3115e313d9f6..6179d94dd5c6 100644 --- a/trunk/arch/arm/boot/compressed/head-sa1100.S +++ b/trunk/arch/arm/boot/compressed/head-sa1100.S @@ -11,7 +11,6 @@ #include .section ".start", "ax" - .arch armv4 __SA1100_start: diff --git a/trunk/arch/arm/boot/compressed/head-shark.S b/trunk/arch/arm/boot/compressed/head-shark.S index 92b56897ed64..089c560e07f1 100644 --- a/trunk/arch/arm/boot/compressed/head-shark.S +++ b/trunk/arch/arm/boot/compressed/head-shark.S @@ -18,7 +18,6 @@ .section ".start", "ax" - .arch armv4 b __beginning __ofw_data: .long 0 @ the number of memory blocks diff --git a/trunk/arch/arm/boot/compressed/head.S b/trunk/arch/arm/boot/compressed/head.S index 032a8d987148..fe4d9c3ad761 100644 --- a/trunk/arch/arm/boot/compressed/head.S +++ b/trunk/arch/arm/boot/compressed/head.S @@ -11,7 +11,6 @@ #include #include - .arch armv7-a /* * Debugging stuff * @@ -806,8 +805,8 @@ call_cache_fn: adr r12, proc_types .align 2 .type proc_types,#object proc_types: - .word 0x41000000 @ old ARM ID - .word 0xff00f000 + .word 0x00000000 @ old ARM ID + .word 0x0000f000 mov pc, lr THUMB( nop ) mov pc, lr diff --git a/trunk/arch/arm/boot/dts/Makefile b/trunk/arch/arm/boot/dts/Makefile index f0895c581a89..b9f7121e6ecf 100644 --- a/trunk/arch/arm/boot/dts/Makefile +++ b/trunk/arch/arm/boot/dts/Makefile @@ -177,9 +177,7 @@ dtb-$(CONFIG_ARCH_SPEAR3XX)+= spear300-evb.dtb \ spear320-evb.dtb \ spear320-hmi.dtb dtb-$(CONFIG_ARCH_SPEAR6XX)+= spear600-evb.dtb -dtb-$(CONFIG_ARCH_SUNXI) += \ - sun4i-a10-cubieboard.dtb \ - sun4i-a10-mini-xplus.dtb \ +dtb-$(CONFIG_ARCH_SUNXI) += sun4i-a10-cubieboard.dtb \ sun4i-a10-hackberry.dtb \ sun5i-a13-olinuxino.dtb dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \ diff --git a/trunk/arch/arm/boot/dts/am33xx.dtsi b/trunk/arch/arm/boot/dts/am33xx.dtsi index 8e1248f01fab..1460d9b88adf 100644 --- a/trunk/arch/arm/boot/dts/am33xx.dtsi +++ b/trunk/arch/arm/boot/dts/am33xx.dtsi @@ -409,8 +409,8 @@ ti,hwmods = "gpmc"; reg = <0x50000000 0x2000>; interrupts = <100>; - gpmc,num-cs = <7>; - gpmc,num-waitpins = <2>; + num-cs = <7>; + num-waitpins = <2>; #address-cells = <2>; #size-cells = <1>; status = "disabled"; diff --git a/trunk/arch/arm/boot/dts/armada-370-xp.dtsi b/trunk/arch/arm/boot/dts/armada-370-xp.dtsi index 550eb772c30e..272bbc65fab0 100644 --- a/trunk/arch/arm/boot/dts/armada-370-xp.dtsi +++ b/trunk/arch/arm/boot/dts/armada-370-xp.dtsi @@ -33,8 +33,7 @@ #size-cells = <1>; compatible = "simple-bus"; interrupt-parent = <&mpic>; - ranges = <0 0 0xd0000000 0x0100000 /* internal registers */ - 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */>; + ranges = <0 0 0xd0000000 0x100000>; internal-regs { compatible = "simple-bus"; diff --git a/trunk/arch/arm/boot/dts/armada-370.dtsi b/trunk/arch/arm/boot/dts/armada-370.dtsi index aee2b1866ce2..b2c1b5af9749 100644 --- a/trunk/arch/arm/boot/dts/armada-370.dtsi +++ b/trunk/arch/arm/boot/dts/armada-370.dtsi @@ -29,8 +29,7 @@ }; soc { - ranges = <0 0xd0000000 0x0100000 /* internal registers */ - 0xe0000000 0xe0000000 0x8100000 /* PCIe */>; + ranges = <0 0xd0000000 0x100000>; internal-regs { system-controller@18200 { compatible = "marvell,armada-370-xp-system-controller"; @@ -39,12 +38,12 @@ L2: l2-cache { compatible = "marvell,aurora-outer-cache"; - reg = <0x08000 0x1000>; + reg = <0xd0008000 0x1000>; cache-id-part = <0x100>; wt-override; }; - interrupt-controller@20000 { + mpic: interrupt-controller@20000 { reg = <0x20a00 0x1d0>, <0x21870 0x58>; }; diff --git a/trunk/arch/arm/boot/dts/armada-xp-gp.dts b/trunk/arch/arm/boot/dts/armada-xp-gp.dts index 76db557adbe7..26ad06fc147e 100644 --- a/trunk/arch/arm/boot/dts/armada-xp-gp.dts +++ b/trunk/arch/arm/boot/dts/armada-xp-gp.dts @@ -39,10 +39,6 @@ }; soc { - ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */ - 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */ - 0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB */>; - internal-regs { serial@12000 { clock-frequency = <250000000>; diff --git a/trunk/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/trunk/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts index fdea75c73411..f14d36c46159 100644 --- a/trunk/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ b/trunk/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts @@ -27,10 +27,6 @@ }; soc { - ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */ - 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */ - 0xf0000000 0 0xf0000000 0x8000000 /* Device Bus, NOR 128MiB */>; - internal-regs { serial@12000 { clock-frequency = <250000000>; diff --git a/trunk/arch/arm/boot/dts/armada-xp.dtsi b/trunk/arch/arm/boot/dts/armada-xp.dtsi index 5b902f9a3af2..bacab11c10dc 100644 --- a/trunk/arch/arm/boot/dts/armada-xp.dtsi +++ b/trunk/arch/arm/boot/dts/armada-xp.dtsi @@ -31,7 +31,7 @@ wt-override; }; - interrupt-controller@20000 { + mpic: interrupt-controller@20000 { reg = <0x20a00 0x2d0>, <0x21070 0x58>; }; diff --git a/trunk/arch/arm/boot/dts/at91sam9260.dtsi b/trunk/arch/arm/boot/dts/at91sam9260.dtsi index 84c4bef2d726..70b5ccbac234 100644 --- a/trunk/arch/arm/boot/dts/at91sam9260.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9260.dtsi @@ -264,7 +264,7 @@ atmel,pins = <0 10 0x2 0x0 /* PA10 periph B */ 0 11 0x2 0x0 /* PA11 periph B */ - 0 22 0x2 0x0 /* PA22 periph B */ + 0 24 0x2 0x0 /* PA24 periph B */ 0 25 0x2 0x0 /* PA25 periph B */ 0 26 0x2 0x0 /* PA26 periph B */ 0 27 0x2 0x0 /* PA27 periph B */ diff --git a/trunk/arch/arm/boot/dts/at91sam9n12.dtsi b/trunk/arch/arm/boot/dts/at91sam9n12.dtsi index 8d25f889928e..3de8e6dfbcb1 100644 --- a/trunk/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9n12.dtsi @@ -57,7 +57,6 @@ compatible = "atmel,at91rm9200-aic"; interrupt-controller; reg = <0xfffff000 0x200>; - atmel,external-irqs = <31>; }; ramc0: ramc@ffffe800 { diff --git a/trunk/arch/arm/boot/dts/at91sam9x25ek.dts b/trunk/arch/arm/boot/dts/at91sam9x25ek.dts index 315250b4995e..3b40d11d65e7 100644 --- a/trunk/arch/arm/boot/dts/at91sam9x25ek.dts +++ b/trunk/arch/arm/boot/dts/at91sam9x25ek.dts @@ -11,7 +11,7 @@ /include/ "at91sam9x5ek.dtsi" / { - model = "Atmel AT91SAM9X25-EK"; + model = "Atmel AT91SAM9G25-EK"; compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; ahb { diff --git a/trunk/arch/arm/boot/dts/bcm2835.dtsi b/trunk/arch/arm/boot/dts/bcm2835.dtsi index 1e12aeff403b..f0052dccf9a8 100644 --- a/trunk/arch/arm/boot/dts/bcm2835.dtsi +++ b/trunk/arch/arm/boot/dts/bcm2835.dtsi @@ -44,7 +44,6 @@ reg = <0x7e201000 0x1000>; interrupts = <2 25>; clock-frequency = <3000000>; - arm,primecell-periphid = <0x00241011>; }; gpio: gpio { diff --git a/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi index ded558bb0f3b..d1650fb34c0a 100644 --- a/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi +++ b/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi @@ -763,7 +763,7 @@ }; }; - pinctrl@03860000 { + pinctrl@03680000 { gpz: gpz { gpio-controller; #gpio-cells = <2>; diff --git a/trunk/arch/arm/boot/dts/exynos5250.dtsi b/trunk/arch/arm/boot/dts/exynos5250.dtsi index fc9fb3d526e2..98dfc3ea5c0b 100644 --- a/trunk/arch/arm/boot/dts/exynos5250.dtsi +++ b/trunk/arch/arm/boot/dts/exynos5250.dtsi @@ -161,9 +161,9 @@ interrupts = <0 50 0>; }; - pinctrl_3: pinctrl@03860000 { + pinctrl_3: pinctrl@03680000 { compatible = "samsung,exynos5250-pinctrl"; - reg = <0x03860000 0x1000>; + reg = <0x0368000 0x1000>; interrupts = <0 47 0>; }; @@ -497,21 +497,6 @@ clock-names = "usbhost"; }; - usbphy@12130000 { - compatible = "samsung,exynos5250-usb2phy"; - reg = <0x12130000 0x100>; - clocks = <&clock 1>, <&clock 285>; - clock-names = "ext_xtal", "usbhost"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - - usbphy-sys { - reg = <0x10040704 0x8>, - <0x10050230 0x4>; - }; - }; - amba { #address-cells = <1>; #size-cells = <1>; diff --git a/trunk/arch/arm/boot/dts/imx25.dtsi b/trunk/arch/arm/boot/dts/imx25.dtsi index 701153992c69..d2550e0bca24 100644 --- a/trunk/arch/arm/boot/dts/imx25.dtsi +++ b/trunk/arch/arm/boot/dts/imx25.dtsi @@ -141,8 +141,8 @@ #size-cells = <0>; compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; reg = <0x43fa4000 0x4000>; - clocks = <&clks 62>, <&clks 62>; - clock-names = "ipg", "per"; + clocks = <&clks 62>; + clock-names = "ipg"; interrupts = <14>; status = "disabled"; }; @@ -182,8 +182,8 @@ compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; reg = <0x50004000 0x4000>; interrupts = <0>; - clocks = <&clks 80>, <&clks 80>; - clock-names = "ipg", "per"; + clocks = <&clks 80>; + clock-names = "ipg"; status = "disabled"; }; @@ -210,8 +210,8 @@ #size-cells = <0>; compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; reg = <0x50010000 0x4000>; - clocks = <&clks 79>, <&clks 79>; - clock-names = "ipg", "per"; + clocks = <&clks 79>; + clock-names = "ipg"; interrupts = <13>; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/imx27.dtsi b/trunk/arch/arm/boot/dts/imx27.dtsi index 75bd11386516..ff4bd4873edf 100644 --- a/trunk/arch/arm/boot/dts/imx27.dtsi +++ b/trunk/arch/arm/boot/dts/imx27.dtsi @@ -131,7 +131,7 @@ compatible = "fsl,imx27-cspi"; reg = <0x1000e000 0x1000>; interrupts = <16>; - clocks = <&clks 53>, <&clks 53>; + clocks = <&clks 53>, <&clks 0>; clock-names = "ipg", "per"; status = "disabled"; }; @@ -142,7 +142,7 @@ compatible = "fsl,imx27-cspi"; reg = <0x1000f000 0x1000>; interrupts = <15>; - clocks = <&clks 52>, <&clks 52>; + clocks = <&clks 52>, <&clks 0>; clock-names = "ipg", "per"; status = "disabled"; }; @@ -223,7 +223,7 @@ compatible = "fsl,imx27-cspi"; reg = <0x10017000 0x1000>; interrupts = <6>; - clocks = <&clks 51>, <&clks 51>; + clocks = <&clks 51>, <&clks 0>; clock-names = "ipg", "per"; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/imx51.dtsi b/trunk/arch/arm/boot/dts/imx51.dtsi index 53fdde69bbf4..21bb786c5b31 100644 --- a/trunk/arch/arm/boot/dts/imx51.dtsi +++ b/trunk/arch/arm/boot/dts/imx51.dtsi @@ -631,7 +631,7 @@ compatible = "fsl,imx51-cspi", "fsl,imx35-cspi"; reg = <0x83fc0000 0x4000>; interrupts = <38>; - clocks = <&clks 55>, <&clks 55>; + clocks = <&clks 55>, <&clks 0>; clock-names = "ipg", "per"; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/imx53.dtsi b/trunk/arch/arm/boot/dts/imx53.dtsi index eb83aa039b8b..845982eaac22 100644 --- a/trunk/arch/arm/boot/dts/imx53.dtsi +++ b/trunk/arch/arm/boot/dts/imx53.dtsi @@ -714,7 +714,7 @@ compatible = "fsl,imx53-cspi", "fsl,imx35-cspi"; reg = <0x63fc0000 0x4000>; interrupts = <38>; - clocks = <&clks 55>, <&clks 55>; + clocks = <&clks 55>, <&clks 0>; clock-names = "ipg", "per"; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/omap3.dtsi b/trunk/arch/arm/boot/dts/omap3.dtsi index 99ba6e14ebf3..82a404da1c0d 100644 --- a/trunk/arch/arm/boot/dts/omap3.dtsi +++ b/trunk/arch/arm/boot/dts/omap3.dtsi @@ -516,7 +516,7 @@ usb_otg_hs: usb_otg_hs@480ab000 { compatible = "ti,omap3-musb"; reg = <0x480ab000 0x1000>; - interrupts = <92>, <93>; + interrupts = <0 92 0x4>, <0 93 0x4>; interrupt-names = "mc", "dma"; ti,hwmods = "usb_otg_hs"; multipoint = <1>; diff --git a/trunk/arch/arm/boot/dts/omap4-panda-common.dtsi b/trunk/arch/arm/boot/dts/omap4-panda-common.dtsi index eeb734e25709..03bd60deb52b 100644 --- a/trunk/arch/arm/boot/dts/omap4-panda-common.dtsi +++ b/trunk/arch/arm/boot/dts/omap4-panda-common.dtsi @@ -56,23 +56,9 @@ }; }; -&omap4_pmx_wkup { - pinctrl-names = "default"; - pinctrl-0 = < - &twl6030_wkup_pins - >; - - twl6030_wkup_pins: pinmux_twl6030_wkup_pins { - pinctrl-single,pins = < - 0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */ - >; - }; -}; - &omap4_pmx_core { pinctrl-names = "default"; pinctrl-0 = < - &twl6030_pins &twl6040_pins &mcpdm_pins &mcbsp1_pins @@ -80,12 +66,6 @@ &tpd12s015_pins >; - twl6030_pins: pinmux_twl6030_pins { - pinctrl-single,pins = < - 0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */ - >; - }; - twl6040_pins: pinmux_twl6040_pins { pinctrl-single,pins = < 0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */ diff --git a/trunk/arch/arm/boot/dts/omap4-sdp.dts b/trunk/arch/arm/boot/dts/omap4-sdp.dts index 98505a2ef162..a35d9cd58063 100644 --- a/trunk/arch/arm/boot/dts/omap4-sdp.dts +++ b/trunk/arch/arm/boot/dts/omap4-sdp.dts @@ -142,23 +142,9 @@ }; }; -&omap4_pmx_wkup { - pinctrl-names = "default"; - pinctrl-0 = < - &twl6030_wkup_pins - >; - - twl6030_wkup_pins: pinmux_twl6030_wkup_pins { - pinctrl-single,pins = < - 0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */ - >; - }; -}; - &omap4_pmx_core { pinctrl-names = "default"; pinctrl-0 = < - &twl6030_pins &twl6040_pins &mcpdm_pins &dmic_pins @@ -193,12 +179,6 @@ >; }; - twl6030_pins: pinmux_twl6030_pins { - pinctrl-single,pins = < - 0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */ - >; - }; - twl6040_pins: pinmux_twl6040_pins { pinctrl-single,pins = < 0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */ diff --git a/trunk/arch/arm/boot/dts/omap5.dtsi b/trunk/arch/arm/boot/dts/omap5.dtsi index 635cae283011..3dd7ff825828 100644 --- a/trunk/arch/arm/boot/dts/omap5.dtsi +++ b/trunk/arch/arm/boot/dts/omap5.dtsi @@ -538,7 +538,6 @@ interrupts = <0 41 0x4>; ti,hwmods = "timer5"; ti,timer-dsp; - ti,timer-pwm; }; timer6: timer@4013a000 { @@ -575,7 +574,6 @@ reg = <0x4803e000 0x80>; interrupts = <0 45 0x4>; ti,hwmods = "timer9"; - ti,timer-pwm; }; timer10: timer@48086000 { @@ -583,7 +581,6 @@ reg = <0x48086000 0x80>; interrupts = <0 46 0x4>; ti,hwmods = "timer10"; - ti,timer-pwm; }; timer11: timer@48088000 { diff --git a/trunk/arch/arm/boot/dts/sama5d3.dtsi b/trunk/arch/arm/boot/dts/sama5d3.dtsi index 5000e0d42849..2e643ea51cce 100644 --- a/trunk/arch/arm/boot/dts/sama5d3.dtsi +++ b/trunk/arch/arm/boot/dts/sama5d3.dtsi @@ -75,6 +75,11 @@ compatible = "atmel,at91sam9x5-spi"; reg = <0xf0004000 0x100>; interrupts = <24 4 3>; + cs-gpios = <&pioD 13 0 + &pioD 14 0 /* conflicts with SCK0 and CANRX0 */ + &pioD 15 0 /* conflicts with CTS0 and CANTX0 */ + &pioD 16 0 /* conflicts with RTS0 and PWMFI3 */ + >; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_spi0>; status = "disabled"; @@ -151,7 +156,7 @@ }; macb0: ethernet@f0028000 { - compatible = "cdns,pc302-gem", "cdns,gem"; + compatible = "cnds,pc302-gem", "cdns,gem"; reg = <0xf0028000 0x100>; interrupts = <34 4 3>; pinctrl-names = "default"; @@ -198,6 +203,11 @@ compatible = "atmel,at91sam9x5-spi"; reg = <0xf8008000 0x100>; interrupts = <25 4 3>; + cs-gpios = <&pioC 25 0 + &pioC 26 0 /* conflitcs with TWD1 and ISI_D11 */ + &pioC 27 0 /* conflitcs with TWCK1 and ISI_D10 */ + &pioC 28 0 /* conflitcs with PWMFI0 and ISI_D9 */ + >; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_spi1>; status = "disabled"; diff --git a/trunk/arch/arm/boot/dts/sama5d3xcm.dtsi b/trunk/arch/arm/boot/dts/sama5d3xcm.dtsi index b336e7787cb3..1f8ed404626c 100644 --- a/trunk/arch/arm/boot/dts/sama5d3xcm.dtsi +++ b/trunk/arch/arm/boot/dts/sama5d3xcm.dtsi @@ -32,10 +32,6 @@ ahb { apb { - spi0: spi@f0004000 { - cs-gpios = <&pioD 13 0>, <0>, <0>, <0>; - }; - macb0: ethernet@f0028000 { phy-mode = "rgmii"; }; diff --git a/trunk/arch/arm/boot/dts/ste-nomadik-s8815.dts b/trunk/arch/arm/boot/dts/ste-nomadik-s8815.dts index 6f82d9368948..b28fbf3408e3 100644 --- a/trunk/arch/arm/boot/dts/ste-nomadik-s8815.dts +++ b/trunk/arch/arm/boot/dts/ste-nomadik-s8815.dts @@ -14,19 +14,13 @@ bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; }; - /* This is where the interrupt is routed on the S8815 board */ - external-bus@34000000 { - ethernet@300 { - interrupt-parent = <&gpio3>; - interrupts = <8 0x1>; - }; - }; - /* Custom board node with GPIO pins to active etc */ usb-s8815 { /* The S8815 is using this very GPIO pin for the SMSC91x IRQs */ ethernet-gpio { - gpios = <&gpio3 8 0x1>; + gpios = <&gpio3 19 0x1>; + interrupts = <19 0x1>; + interrupt-parent = <&gpio3>; }; /* This will bias the MMC/SD card detect line */ mmcsd-gpio { diff --git a/trunk/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts b/trunk/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts index 078ed7f618d7..4a7c35d6726a 100644 --- a/trunk/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts +++ b/trunk/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts @@ -22,8 +22,8 @@ bootargs = "earlyprintk console=ttyS0,115200"; }; - soc@01c20000 { - uart0: serial@01c28000 { + soc { + uart0: uart@01c28000 { pinctrl-names = "default"; pinctrl-0 = <&uart0_pins_a>; status = "okay"; diff --git a/trunk/arch/arm/common/mcpm_platsmp.c b/trunk/arch/arm/common/mcpm_platsmp.c index 3caed0db6986..52b88d81b7bb 100644 --- a/trunk/arch/arm/common/mcpm_platsmp.c +++ b/trunk/arch/arm/common/mcpm_platsmp.c @@ -15,6 +15,8 @@ #include #include +#include + #include #include #include @@ -47,6 +49,7 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i static void __cpuinit mcpm_secondary_init(unsigned int cpu) { mcpm_cpu_powered_up(); + gic_secondary_init(0); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/trunk/arch/arm/configs/exynos_defconfig b/trunk/arch/arm/configs/exynos_defconfig index 227abf9cc601..e40b435d204e 100644 --- a/trunk/arch/arm/configs/exynos_defconfig +++ b/trunk/arch/arm/configs/exynos_defconfig @@ -1,4 +1,4 @@ -CONFIG_SYSVIPC=y +CONFIG_EXPERIMENTAL=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y @@ -7,18 +7,17 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y +CONFIG_EFI_PARTITION=y CONFIG_ARCH_EXYNOS=y -CONFIG_S3C_LOWLEVEL_UART_PORT=3 +CONFIG_S3C_LOWLEVEL_UART_PORT=1 CONFIG_S3C24XX_PWM=y CONFIG_ARCH_EXYNOS5=y CONFIG_MACH_EXYNOS4_DT=y +CONFIG_MACH_EXYNOS5_DT=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_PREEMPT=y CONFIG_AEABI=y -CONFIG_HIGHMEM=y -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M" @@ -31,58 +30,35 @@ CONFIG_NET_KEY=y CONFIG_INET=y CONFIG_RFKILL_REGULATOR=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=m CONFIG_NETDEVICES=y CONFIG_SMSC911X=y CONFIG_USB_USBNET=y CONFIG_USB_NET_SMSC75XX=y CONFIG_USB_NET_SMSC95XX=y CONFIG_INPUT_EVDEV=y -CONFIG_KEYBOARD_CROS_EC=y -# CONFIG_MOUSE_PS2 is not set -CONFIG_MOUSE_CYAPA=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_TOUCHSCREEN=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_SAMSUNG=y CONFIG_SERIAL_SAMSUNG_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_HW_RANDOM=y -CONFIG_TCG_TPM=y -CONFIG_TCG_TIS_I2C_INFINEON=y CONFIG_I2C=y -CONFIG_I2C_MUX=y -CONFIG_I2C_ARB_GPIO_CHALLENGE=y -CONFIG_I2C_S3C2410=y -CONFIG_DEBUG_GPIO=y # CONFIG_HWMON is not set -CONFIG_MFD_CROS_EC=y -CONFIG_MFD_CROS_EC_I2C=y -CONFIG_MFD_MAX77686=y -CONFIG_MFD_MAX8997=y -CONFIG_MFD_SEC_CORE=y CONFIG_MFD_TPS65090=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y -CONFIG_REGULATOR_MAX8997=y -CONFIG_REGULATOR_MAX77686=y -CONFIG_REGULATOR_S5M8767=y CONFIG_REGULATOR_TPS65090=y CONFIG_FB=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_SIMPLE=y CONFIG_EXYNOS_VIDEO=y CONFIG_EXYNOS_MIPI_DSI=y CONFIG_EXYNOS_DP=y @@ -91,20 +67,6 @@ CONFIG_FONTS=y CONFIG_FONT_7x14=y CONFIG_LOGO=y CONFIG_USB=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_S5P=y -CONFIG_USB_STORAGE=y -CONFIG_USB_DWC3=y -CONFIG_USB_PHY=y -CONFIG_SAMSUNG_USB2PHY=y -CONFIG_SAMSUNG_USB3PHY=y -CONFIG_MMC=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_S3C=y -CONFIG_MMC_DW=y -CONFIG_MMC_DW_IDMAC=y -CONFIG_MMC_DW_EXYNOS=y -CONFIG_COMMON_CLK_MAX77686=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_EXT4_FS=y @@ -117,7 +79,6 @@ CONFIG_ROMFS_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y -CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y @@ -126,5 +87,6 @@ CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_USER=y -CONFIG_CRYPTO_SHA256=y +CONFIG_DEBUG_LL=y +CONFIG_EARLY_PRINTK=y CONFIG_CRC_CCITT=y diff --git a/trunk/arch/arm/configs/omap1_defconfig b/trunk/arch/arm/configs/omap1_defconfig index 9940f7b4e438..7e0ebb64a7f9 100644 --- a/trunk/arch/arm/configs/omap1_defconfig +++ b/trunk/arch/arm/configs/omap1_defconfig @@ -199,6 +199,7 @@ CONFIG_USB_PHY=y CONFIG_USB_DEBUG=y CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set +CONFIG_USB_SUSPEND=y CONFIG_USB_MON=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=y diff --git a/trunk/arch/arm/configs/omap2plus_defconfig b/trunk/arch/arm/configs/omap2plus_defconfig index abbe31937c65..c1ef64bc5abd 100644 --- a/trunk/arch/arm/configs/omap2plus_defconfig +++ b/trunk/arch/arm/configs/omap2plus_defconfig @@ -20,7 +20,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set -CONFIG_ARCH_MULTI_V6=y CONFIG_ARCH_OMAP2PLUS=y CONFIG_OMAP_RESET_CLOCKS=y CONFIG_OMAP_MUX_DEBUG=y @@ -205,6 +204,7 @@ CONFIG_USB=y CONFIG_USB_DEBUG=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_DEVICEFS=y +CONFIG_USB_SUSPEND=y CONFIG_USB_MON=y CONFIG_USB_WDM=y CONFIG_USB_STORAGE=y diff --git a/trunk/arch/arm/configs/tegra_defconfig b/trunk/arch/arm/configs/tegra_defconfig index f7ba316164d4..a5f0485133cf 100644 --- a/trunk/arch/arm/configs/tegra_defconfig +++ b/trunk/arch/arm/configs/tegra_defconfig @@ -153,7 +153,6 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_VIDEO_CLASS=m CONFIG_DRM=y -CONFIG_TEGRA_HOST1X=y CONFIG_DRM_TEGRA=y CONFIG_BACKLIGHT_LCD_SUPPORT=y # CONFIG_LCD_CLASS_DEVICE is not set @@ -203,7 +202,7 @@ CONFIG_TEGRA20_APB_DMA=y CONFIG_STAGING=y CONFIG_SENSORS_ISL29018=y CONFIG_SENSORS_ISL29028=y -CONFIG_AK8975=y +CONFIG_SENSORS_AK8975=y CONFIG_MFD_NVEC=y CONFIG_KEYBOARD_NVEC=y CONFIG_SERIO_NVEC_PS2=y diff --git a/trunk/arch/arm/crypto/sha1-armv4-large.S b/trunk/arch/arm/crypto/sha1-armv4-large.S index 99207c45ec10..92c6eed7aac9 100644 --- a/trunk/arch/arm/crypto/sha1-armv4-large.S +++ b/trunk/arch/arm/crypto/sha1-armv4-large.S @@ -195,7 +195,6 @@ ENTRY(sha1_block_data_order) add r3,r3,r10 @ E+=F_00_19(B,C,D) cmp r14,sp bne .L_00_15 @ [((11+4)*5+2)*3] - sub sp,sp,#25*4 #if __ARM_ARCH__<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] @@ -291,6 +290,7 @@ ENTRY(sha1_block_data_order) add r3,r3,r10 @ E+=F_00_19(B,C,D) ldr r8,.LK_20_39 @ [+15+16*4] + sub sp,sp,#25*4 cmn sp,#0 @ [+3], clear carry to denote 20_39 .L_20_39_or_60_79: ldr r9,[r14,#15*4] diff --git a/trunk/arch/arm/include/asm/cacheflush.h b/trunk/arch/arm/include/asm/cacheflush.h index 17d0ae8672fa..bff71388e72a 100644 --- a/trunk/arch/arm/include/asm/cacheflush.h +++ b/trunk/arch/arm/include/asm/cacheflush.h @@ -320,7 +320,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma, } #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -extern void flush_kernel_dcache_page(struct page *); +static inline void flush_kernel_dcache_page(struct page *page) +{ +} #define flush_dcache_mmap_lock(mapping) \ spin_lock_irq(&(mapping)->tree_lock) diff --git a/trunk/arch/arm/include/asm/cmpxchg.h b/trunk/arch/arm/include/asm/cmpxchg.h index 4f009c10540d..7eb18c1d8d6c 100644 --- a/trunk/arch/arm/include/asm/cmpxchg.h +++ b/trunk/arch/arm/include/asm/cmpxchg.h @@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ atomic64_t, \ counter), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) + (unsigned long)(o), \ + (unsigned long)(n))) #define cmpxchg64_local(ptr, o, n) \ ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ local64_t, \ a), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) + (unsigned long)(o), \ + (unsigned long)(n))) #endif /* __LINUX_ARM_ARCH__ >= 6 */ diff --git a/trunk/arch/arm/include/asm/cputype.h b/trunk/arch/arm/include/asm/cputype.h index dba62cb1ad08..7652712d1d14 100644 --- a/trunk/arch/arm/include/asm/cputype.h +++ b/trunk/arch/arm/include/asm/cputype.h @@ -32,8 +32,6 @@ #define MPIDR_HWID_BITMASK 0xFFFFFF -#define MPIDR_INVALID (~MPIDR_HWID_BITMASK) - #define MPIDR_LEVEL_BITS 8 #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) diff --git a/trunk/arch/arm/include/asm/glue-proc.h b/trunk/arch/arm/include/asm/glue-proc.h index 8017e94acc5e..ac1dd54724b6 100644 --- a/trunk/arch/arm/include/asm/glue-proc.h +++ b/trunk/arch/arm/include/asm/glue-proc.h @@ -230,15 +230,6 @@ # endif #endif -#ifdef CONFIG_CPU_PJ4B -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_pj4b -# endif -#endif - #ifndef MULTI_CPU #define cpu_proc_init __glue(CPU_NAME,_proc_init) #define cpu_proc_fin __glue(CPU_NAME,_proc_fin) diff --git a/trunk/arch/arm/include/asm/percpu.h b/trunk/arch/arm/include/asm/percpu.h index 209e6504922e..968c0a14e0a3 100644 --- a/trunk/arch/arm/include/asm/percpu.h +++ b/trunk/arch/arm/include/asm/percpu.h @@ -30,15 +30,8 @@ static inline void set_my_cpu_offset(unsigned long off) static inline unsigned long __my_cpu_offset(void) { unsigned long off; - register unsigned long *sp asm ("sp"); - - /* - * Read TPIDRPRW. - * We want to allow caching the value, so avoid using volatile and - * instead use a fake stack read to hazard against barrier(). - */ - asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp)); - + /* Read TPIDRPRW */ + asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); return off; } #define __my_cpu_offset __my_cpu_offset() diff --git a/trunk/arch/arm/include/asm/pgtable-nommu.h b/trunk/arch/arm/include/asm/pgtable-nommu.h index 0642228ff785..7ec60d6075bf 100644 --- a/trunk/arch/arm/include/asm/pgtable-nommu.h +++ b/trunk/arch/arm/include/asm/pgtable-nommu.h @@ -79,6 +79,8 @@ extern unsigned int kobjsize(const void *objp); * No page table caches to initialise. */ #define pgtable_cache_init() do { } while (0) +#define io_remap_pfn_range remap_pfn_range + /* * All 32bit addresses are effectively valid for vmalloc... diff --git a/trunk/arch/arm/include/asm/pgtable.h b/trunk/arch/arm/include/asm/pgtable.h index 229e0dde9c71..9bcd262a9008 100644 --- a/trunk/arch/arm/include/asm/pgtable.h +++ b/trunk/arch/arm/include/asm/pgtable.h @@ -318,6 +318,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN +/* + * remap a physical page `pfn' of size `size' with page protection `prot' + * into virtual address `from' + */ +#define io_remap_pfn_range(vma,from,pfn,size,prot) \ + remap_pfn_range(vma, from, pfn, size, prot) + #define pgtable_cache_init() do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/arm/include/asm/smp_plat.h b/trunk/arch/arm/include/asm/smp_plat.h index e78983202737..aaa61b6f50ff 100644 --- a/trunk/arch/arm/include/asm/smp_plat.h +++ b/trunk/arch/arm/include/asm/smp_plat.h @@ -49,7 +49,7 @@ static inline int cache_ops_need_broadcast(void) /* * Logical CPU mapping. */ -extern u32 __cpu_logical_map[]; +extern int __cpu_logical_map[]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] /* * Retrieve logical cpu index corresponding to a given MPIDR[23:0] diff --git a/trunk/arch/arm/include/asm/tlb.h b/trunk/arch/arm/include/asm/tlb.h index bdf2b8458ec1..99a19512ee26 100644 --- a/trunk/arch/arm/include/asm/tlb.h +++ b/trunk/arch/arm/include/asm/tlb.h @@ -33,6 +33,18 @@ #include #include +/* + * We need to delay page freeing for SMP as other CPUs can access pages + * which have been removed but not yet had their TLB entries invalidated. + * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, + * we need to apply this same delaying tactic to ensure correct operation. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) +#define tlb_fast_mode(tlb) 0 +#else +#define tlb_fast_mode(tlb) 1 +#endif + #define MMU_GATHER_BUNDLE 8 /* @@ -100,10 +112,12 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void tlb_flush_mmu(struct mmu_gather *tlb) { tlb_flush(tlb); - free_pages_and_swap_cache(tlb->pages, tlb->nr); - tlb->nr = 0; - if (tlb->pages == tlb->local) - __tlb_alloc_page(tlb); + if (!tlb_fast_mode(tlb)) { + free_pages_and_swap_cache(tlb->pages, tlb->nr); + tlb->nr = 0; + if (tlb->pages == tlb->local) + __tlb_alloc_page(tlb); + } } static inline void @@ -164,6 +178,11 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { + if (tlb_fast_mode(tlb)) { + free_page_and_swap_cache(page); + return 1; /* avoid calling tlb_flush_mmu */ + } + tlb->pages[tlb->nr++] = page; VM_BUG_ON(tlb->nr > tlb->max); return tlb->max - tlb->nr; diff --git a/trunk/arch/arm/include/debug/ux500.S b/trunk/arch/arm/include/debug/ux500.S index fbd24beeb1fa..2848857f5b62 100644 --- a/trunk/arch/arm/include/debug/ux500.S +++ b/trunk/arch/arm/include/debug/ux500.S @@ -24,9 +24,9 @@ #define U8500_UART0_PHYS_BASE (0x80120000) #define U8500_UART1_PHYS_BASE (0x80121000) #define U8500_UART2_PHYS_BASE (0x80007000) -#define U8500_UART0_VIRT_BASE (0xf8120000) -#define U8500_UART1_VIRT_BASE (0xf8121000) -#define U8500_UART2_VIRT_BASE (0xf8007000) +#define U8500_UART0_VIRT_BASE (0xa8120000) +#define U8500_UART1_VIRT_BASE (0xa8121000) +#define U8500_UART2_VIRT_BASE (0xa8007000) #define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE #define __UX500_VIRT_UART(n) U8500_UART##n##_VIRT_BASE #endif diff --git a/trunk/arch/arm/kernel/devtree.c b/trunk/arch/arm/kernel/devtree.c index 5859c8bc727c..5af04f6daa33 100644 --- a/trunk/arch/arm/kernel/devtree.c +++ b/trunk/arch/arm/kernel/devtree.c @@ -82,7 +82,7 @@ void __init arm_dt_init_cpu_maps(void) u32 i, j, cpuidx = 1; u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; - u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; + u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX }; bool bootcpu_valid = false; cpus = of_find_node_by_path("/cpus"); @@ -92,9 +92,6 @@ void __init arm_dt_init_cpu_maps(void) for_each_child_of_node(cpus, cpu) { u32 hwid; - if (of_node_cmp(cpu->type, "cpu")) - continue; - pr_debug(" * %s...\n", cpu->full_name); /* * A device tree containing CPU nodes with missing "reg" @@ -152,10 +149,9 @@ void __init arm_dt_init_cpu_maps(void) tmp_map[i] = hwid; } - if (!bootcpu_valid) { - pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n"); + if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], " + "fall back to default cpu_logical_map\n")) return; - } /* * Since the boot CPU node contains proper data, and all nodes have diff --git a/trunk/arch/arm/kernel/machine_kexec.c b/trunk/arch/arm/kernel/machine_kexec.c index 4fb074c446bf..8ef8c9337809 100644 --- a/trunk/arch/arm/kernel/machine_kexec.c +++ b/trunk/arch/arm/kernel/machine_kexec.c @@ -134,10 +134,6 @@ void machine_kexec(struct kimage *image) unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; - if (num_online_cpus() > 1) { - pr_err("kexec: error: multiple CPUs still online\n"); - return; - } page_list = image->head & PAGE_MASK; diff --git a/trunk/arch/arm/kernel/process.c b/trunk/arch/arm/kernel/process.c index 6e8931ccf13e..f21970316836 100644 --- a/trunk/arch/arm/kernel/process.c +++ b/trunk/arch/arm/kernel/process.c @@ -184,61 +184,30 @@ int __init reboot_setup(char *str) __setup("reboot=", reboot_setup); -/* - * Called by kexec, immediately prior to machine_kexec(). - * - * This must completely disable all secondary CPUs; simply causing those CPUs - * to execute e.g. a RAM-based pin loop is not sufficient. This allows the - * kexec'd kernel to use any and all RAM as it sees fit, without having to - * avoid any code or data used by any SW CPU pin loop. The CPU hotplug - * functionality embodied in disable_nonboot_cpus() to achieve this. - */ void machine_shutdown(void) { - disable_nonboot_cpus(); +#ifdef CONFIG_SMP + smp_send_stop(); +#endif } -/* - * Halting simply requires that the secondary CPUs stop performing any - * activity (executing tasks, handling interrupts). smp_send_stop() - * achieves this. - */ void machine_halt(void) { - smp_send_stop(); - + machine_shutdown(); local_irq_disable(); while (1); } -/* - * Power-off simply requires that the secondary CPUs stop performing any - * activity (executing tasks, handling interrupts). smp_send_stop() - * achieves this. When the system power is turned off, it will take all CPUs - * with it. - */ void machine_power_off(void) { - smp_send_stop(); - + machine_shutdown(); if (pm_power_off) pm_power_off(); } -/* - * Restart requires that the secondary CPUs stop performing any activity - * while the primary CPU resets the system. Systems with a single CPU can - * use soft_restart() as their machine descriptor's .restart hook, since that - * will cause the only available CPU to reset. Systems with multiple CPUs must - * provide a HW restart implementation, to ensure that all CPUs reset at once. - * This is required so that any code running after reset on the primary CPU - * doesn't have to co-ordinate with other CPUs to ensure they aren't still - * executing pre-reset code, and using RAM that the primary CPU's code wishes - * to use. Implementing such co-ordination would be essentially impossible. - */ void machine_restart(char *cmd) { - smp_send_stop(); + machine_shutdown(); arm_pm_restart(reboot_mode, cmd); @@ -442,6 +411,7 @@ static struct vm_area_struct gate_vma = { .vm_start = 0xffff0000, .vm_end = 0xffff0000 + PAGE_SIZE, .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, + .vm_mm = &init_mm, }; static int __init gate_vma_init(void) diff --git a/trunk/arch/arm/kernel/setup.c b/trunk/arch/arm/kernel/setup.c index b4b1d397592b..1522c7ae31b0 100644 --- a/trunk/arch/arm/kernel/setup.c +++ b/trunk/arch/arm/kernel/setup.c @@ -444,7 +444,7 @@ void notrace cpu_init(void) : "r14"); } -u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; +int __cpu_logical_map[NR_CPUS]; void __init smp_setup_processor_id(void) { diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c index 5919eb451bb9..47ab90563bf4 100644 --- a/trunk/arch/arm/kernel/smp.c +++ b/trunk/arch/arm/kernel/smp.c @@ -251,7 +251,7 @@ void __ref cpu_die(void) * this returns, power and/or clocks can be removed at any point * from this CPU and its cache by platform_cpu_kill(). */ - complete(&cpu_died); + RCU_NONIDLE(complete(&cpu_died)); /* * Ensure that the cache lines associated with that completion are @@ -651,6 +651,17 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } +#ifdef CONFIG_HOTPLUG_CPU +static void smp_kill_cpus(cpumask_t *mask) +{ + unsigned int cpu; + for_each_cpu(cpu, mask) + platform_cpu_kill(cpu); +} +#else +static void smp_kill_cpus(cpumask_t *mask) { } +#endif + void smp_send_stop(void) { unsigned long timeout; @@ -668,6 +679,8 @@ void smp_send_stop(void) if (num_online_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); + + smp_kill_cpus(&mask); } /* diff --git a/trunk/arch/arm/kernel/topology.c b/trunk/arch/arm/kernel/topology.c index c5a59546a256..f10316b4ecdc 100644 --- a/trunk/arch/arm/kernel/topology.c +++ b/trunk/arch/arm/kernel/topology.c @@ -13,7 +13,6 @@ #include #include -#include #include #include #include @@ -201,7 +200,6 @@ static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} * cpu topology table */ struct cputopo_arm cpu_topology[NR_CPUS]; -EXPORT_SYMBOL_GPL(cpu_topology); const struct cpumask *cpu_coregroup_mask(int cpu) { diff --git a/trunk/arch/arm/kvm/arm.c b/trunk/arch/arm/kvm/arm.c index ef1703b9587b..37d216d814cd 100644 --- a/trunk/arch/arm/kvm/arm.c +++ b/trunk/arch/arm/kvm/arm.c @@ -492,11 +492,6 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } -static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.target >= 0; -} - /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -513,7 +508,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - if (unlikely(!kvm_vcpu_initialized(vcpu))) + /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ + if (unlikely(vcpu->arch.target < 0)) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -714,10 +710,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; - - if (unlikely(!kvm_vcpu_initialized(vcpu))) - return -ENOEXEC; - if (copy_from_user(®, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -730,9 +722,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; - if (unlikely(!kvm_vcpu_initialized(vcpu))) - return -ENOEXEC; - if (copy_from_user(®_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n; diff --git a/trunk/arch/arm/kvm/mmu.c b/trunk/arch/arm/kvm/mmu.c index 84ba67b982c0..965706578f13 100644 --- a/trunk/arch/arm/kvm/mmu.c +++ b/trunk/arch/arm/kvm/mmu.c @@ -43,14 +43,7 @@ static phys_addr_t hyp_idmap_vector; static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { - /* - * This function also gets called when dealing with HYP page - * tables. As HYP doesn't have an associated struct kvm (and - * the HYP page tables are fairly static), we don't do - * anything there. - */ - if (kvm) - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); } static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, @@ -85,20 +78,18 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) return p; } -static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) +static void clear_pud_entry(pud_t *pud) { pmd_t *pmd_table = pmd_offset(pud, 0); pud_clear(pud); - kvm_tlb_flush_vmid_ipa(kvm, addr); pmd_free(NULL, pmd_table); put_page(virt_to_page(pud)); } -static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) +static void clear_pmd_entry(pmd_t *pmd) { pte_t *pte_table = pte_offset_kernel(pmd, 0); pmd_clear(pmd); - kvm_tlb_flush_vmid_ipa(kvm, addr); pte_free_kernel(NULL, pte_table); put_page(virt_to_page(pmd)); } @@ -109,12 +100,11 @@ static bool pmd_empty(pmd_t *pmd) return page_count(pmd_page) == 1; } -static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) +static void clear_pte_entry(pte_t *pte) { if (pte_present(*pte)) { kvm_set_pte(pte, __pte(0)); put_page(virt_to_page(pte)); - kvm_tlb_flush_vmid_ipa(kvm, addr); } } @@ -124,8 +114,7 @@ static bool pte_empty(pte_t *pte) return page_count(pte_page) == 1; } -static void unmap_range(struct kvm *kvm, pgd_t *pgdp, - unsigned long long start, u64 size) +static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) { pgd_t *pgd; pud_t *pud; @@ -149,15 +138,15 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, } pte = pte_offset_kernel(pmd, addr); - clear_pte_entry(kvm, pte, addr); + clear_pte_entry(pte); range = PAGE_SIZE; /* If we emptied the pte, walk back up the ladder */ if (pte_empty(pte)) { - clear_pmd_entry(kvm, pmd, addr); + clear_pmd_entry(pmd); range = PMD_SIZE; if (pmd_empty(pmd)) { - clear_pud_entry(kvm, pud, addr); + clear_pud_entry(pud); range = PUD_SIZE; } } @@ -176,14 +165,14 @@ void free_boot_hyp_pgd(void) mutex_lock(&kvm_hyp_pgd_mutex); if (boot_hyp_pgd) { - unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); - unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); + unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); + unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); kfree(boot_hyp_pgd); boot_hyp_pgd = NULL; } if (hyp_pgd) - unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); + unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); kfree(init_bounce_page); init_bounce_page = NULL; @@ -211,10 +200,9 @@ void free_hyp_pgds(void) if (hyp_pgd) { for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) - unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); + unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) - unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); - + unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); kfree(hyp_pgd); hyp_pgd = NULL; } @@ -405,7 +393,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) */ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) { - unmap_range(kvm, kvm->arch.pgd, start, size); + unmap_range(kvm->arch.pgd, start, size); } /** @@ -687,6 +675,7 @@ static void handle_hva_to_gpa(struct kvm *kvm, static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) { unmap_stage2_range(kvm, gpa, PAGE_SIZE); + kvm_tlb_flush_vmid_ipa(kvm, gpa); } int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) diff --git a/trunk/arch/arm/mach-at91/at91rm9200_time.c b/trunk/arch/arm/mach-at91/at91rm9200_time.c index 180b3024bec3..2acdff4c1dfe 100644 --- a/trunk/arch/arm/mach-at91/at91rm9200_time.c +++ b/trunk/arch/arm/mach-at91/at91rm9200_time.c @@ -174,7 +174,6 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev) static struct clock_event_device clkevt = { .name = "at91_tick", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .shift = 32, .rating = 150, .set_next_event = clkevt32k_next_event, .set_mode = clkevt32k_mode, @@ -265,11 +264,9 @@ void __init at91rm9200_timer_init(void) at91_st_write(AT91_ST_RTMR, 1); /* Setup timer clockevent, with minimum of two ticks (important!!) */ - clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); - clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); - clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; clkevt.cpumask = cpumask_of(0); - clockevents_register_device(&clkevt); + clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK, + 2, AT91_ST_ALMV); /* register clocksource */ clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); diff --git a/trunk/arch/arm/mach-at91/at91sam9n12.c b/trunk/arch/arm/mach-at91/at91sam9n12.c index c7d670d11802..13cdbcd48f51 100644 --- a/trunk/arch/arm/mach-at91/at91sam9n12.c +++ b/trunk/arch/arm/mach-at91/at91sam9n12.c @@ -223,7 +223,13 @@ static void __init at91sam9n12_map_io(void) at91_init_sram(0, AT91SAM9N12_SRAM_BASE, AT91SAM9N12_SRAM_SIZE); } +void __init at91sam9n12_initialize(void) +{ + at91_extern_irq = (1 << AT91SAM9N12_ID_IRQ0); +} + AT91_SOC_START(at91sam9n12) .map_io = at91sam9n12_map_io, .register_clocks = at91sam9n12_register_clocks, + .init = at91sam9n12_initialize, AT91_SOC_END diff --git a/trunk/arch/arm/mach-at91/include/mach/at91_pmc.h b/trunk/arch/arm/mach-at91/include/mach/at91_pmc.h index 2bd7f51b0b82..31df12029c4e 100644 --- a/trunk/arch/arm/mach-at91/include/mach/at91_pmc.h +++ b/trunk/arch/arm/mach-at91/include/mach/at91_pmc.h @@ -179,9 +179,9 @@ extern void __iomem *at91_pmc_base; #define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ #define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */ #define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */ -#define AT91_PMC_PCR_DIV2 0x1 /* Peripheral clock is MCK/2 */ -#define AT91_PMC_PCR_DIV4 0x2 /* Peripheral clock is MCK/4 */ -#define AT91_PMC_PCR_DIV8 0x3 /* Peripheral clock is MCK/8 */ +#define AT91_PMC_PCR_DIV2 0x2 /* Peripheral clock is MCK/2 */ +#define AT91_PMC_PCR_DIV4 0x4 /* Peripheral clock is MCK/4 */ +#define AT91_PMC_PCR_DIV8 0x8 /* Peripheral clock is MCK/8 */ #define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ #endif diff --git a/trunk/arch/arm/mach-exynos/Kconfig b/trunk/arch/arm/mach-exynos/Kconfig index ff18fc2ea46f..d19edff0ea6e 100644 --- a/trunk/arch/arm/mach-exynos/Kconfig +++ b/trunk/arch/arm/mach-exynos/Kconfig @@ -250,7 +250,6 @@ config MACH_ARMLEX4210 config MACH_UNIVERSAL_C210 bool "Mobile UNIVERSAL_C210 Board" select CLKSRC_MMIO - select CLKSRC_SAMSUNG_PWM select CPU_EXYNOS4210 select EXYNOS4_SETUP_FIMC select EXYNOS4_SETUP_FIMD0 @@ -282,6 +281,7 @@ config MACH_UNIVERSAL_C210 select S5P_DEV_TV select S5P_GPIO_INT select S5P_SETUP_MIPIPHY + select SAMSUNG_HRT help Machine support for Samsung Mobile Universal S5PC210 Reference Board. @@ -410,7 +410,6 @@ config MACH_EXYNOS4_DT depends on ARCH_EXYNOS4 select ARM_AMBA select CLKSRC_OF - select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210 select CPU_EXYNOS4210 select KEYBOARD_SAMSUNG if INPUT_KEYBOARD select PINCTRL diff --git a/trunk/arch/arm/mach-exynos/common.c b/trunk/arch/arm/mach-exynos/common.c index f7e504b7874d..745e304ad0de 100644 --- a/trunk/arch/arm/mach-exynos/common.c +++ b/trunk/arch/arm/mach-exynos/common.c @@ -10,14 +10,12 @@ */ #include -#include #include #include #include #include #include #include -#include #include #include #include @@ -304,13 +302,6 @@ static struct map_desc exynos5440_iodesc0[] __initdata = { }, }; -static struct samsung_pwm_variant exynos4_pwm_variant = { - .bits = 32, - .div_base = 0, - .has_tint_cstat = true, - .tclk_mask = 0, -}; - void exynos4_restart(char mode, const char *cmd) { __raw_writel(0x1, S5P_SWRESET); @@ -326,16 +317,9 @@ void exynos5_restart(char mode, const char *cmd) val = 0x1; addr = EXYNOS_SWRESET; } else if (of_machine_is_compatible("samsung,exynos5440")) { - u32 status; np = of_find_compatible_node(NULL, NULL, "samsung,exynos5440-clock"); - - addr = of_iomap(np, 0) + 0xbc; - status = __raw_readl(addr); - addr = of_iomap(np, 0) + 0xcc; - val = __raw_readl(addr); - - val = (val & 0xffff0000) | (status & 0xffff); + val = (0xfff << 20) | (0x1 << 16); } else { pr_err("%s: cannot support non-DT\n", __func__); return; @@ -386,8 +370,6 @@ int __init exynos_fdt_map_chipid(unsigned long node, const char *uname, void __init exynos_init_io(struct map_desc *mach_desc, int size) { - debug_ll_io_init(); - #ifdef CONFIG_OF if (initial_boot_params) of_scan_flat_dt(exynos_fdt_map_chipid, NULL); @@ -460,20 +442,8 @@ static void __init exynos5440_map_io(void) iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0)); } -void __init exynos_set_timer_source(u8 channels) -{ - exynos4_pwm_variant.output_mask = BIT(SAMSUNG_PWM_NUM) - 1; - exynos4_pwm_variant.output_mask &= ~channels; -} - void __init exynos_init_time(void) { - unsigned int timer_irqs[SAMSUNG_PWM_NUM] = { - EXYNOS4_IRQ_TIMER0_VIC, EXYNOS4_IRQ_TIMER1_VIC, - EXYNOS4_IRQ_TIMER2_VIC, EXYNOS4_IRQ_TIMER3_VIC, - EXYNOS4_IRQ_TIMER4_VIC, - }; - if (of_have_populated_dt()) { #ifdef CONFIG_OF of_clk_init(NULL); @@ -485,14 +455,7 @@ void __init exynos_init_time(void) exynos4_clk_init(NULL, !soc_is_exynos4210(), S5P_VA_CMU, readl(S5P_VA_CHIPID + 8) & 1); exynos4_clk_register_fixed_ext(xxti_f, xusbxti_f); #endif -#ifdef CONFIG_CLKSRC_SAMSUNG_PWM - if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0) - samsung_pwm_clocksource_init(S3C_VA_TIMER, - timer_irqs, &exynos4_pwm_variant); - else -#endif - mct_init(S5P_VA_SYSTIMER, EXYNOS4_IRQ_MCT_G0, - EXYNOS4_IRQ_MCT_L0, EXYNOS4_IRQ_MCT_L1); + mct_init(S5P_VA_SYSTIMER, EXYNOS4_IRQ_MCT_G0, EXYNOS4_IRQ_MCT_L0, EXYNOS4_IRQ_MCT_L1); } } diff --git a/trunk/arch/arm/mach-exynos/common.h b/trunk/arch/arm/mach-exynos/common.h index 11fc1e29819b..60dd35cc01a6 100644 --- a/trunk/arch/arm/mach-exynos/common.h +++ b/trunk/arch/arm/mach-exynos/common.h @@ -32,8 +32,6 @@ void exynos4_clk_register_fixed_ext(unsigned long, unsigned long); void exynos_firmware_init(void); -void exynos_set_timer_source(u8 channels); - #ifdef CONFIG_PM_GENERIC_DOMAINS int exynos_pm_late_initcall(void); #else diff --git a/trunk/arch/arm/mach-exynos/include/mach/pm-core.h b/trunk/arch/arm/mach-exynos/include/mach/pm-core.h index 296090e7f423..7dbbfec13ea5 100644 --- a/trunk/arch/arm/mach-exynos/include/mach/pm-core.h +++ b/trunk/arch/arm/mach-exynos/include/mach/pm-core.h @@ -18,15 +18,8 @@ #ifndef __ASM_ARCH_PM_CORE_H #define __ASM_ARCH_PM_CORE_H __FILE__ -#include #include -#ifdef CONFIG_PINCTRL_EXYNOS -extern u32 exynos_get_eint_wake_mask(void); -#else -static inline u32 exynos_get_eint_wake_mask(void) { return 0xffffffff; } -#endif - static inline void s3c_pm_debug_init_uart(void) { /* nothing here yet */ @@ -34,12 +27,7 @@ static inline void s3c_pm_debug_init_uart(void) static inline void s3c_pm_arch_prepare_irqs(void) { - u32 eintmask = s3c_irqwake_eintmask; - - if (of_have_populated_dt()) - eintmask = exynos_get_eint_wake_mask(); - - __raw_writel(eintmask, S5P_EINT_WAKEUP_MASK); + __raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK); __raw_writel(s3c_irqwake_intmask & ~(1 << 31), S5P_WAKEUP_MASK); } diff --git a/trunk/arch/arm/mach-exynos/mach-universal_c210.c b/trunk/arch/arm/mach-exynos/mach-universal_c210.c index 74ddb2b55614..327d50d4681d 100644 --- a/trunk/arch/arm/mach-exynos/mach-universal_c210.c +++ b/trunk/arch/arm/mach-exynos/mach-universal_c210.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -1093,7 +1094,7 @@ static void __init universal_map_io(void) { exynos_init_io(NULL, 0); s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs)); - exynos_set_timer_source(BIT(2) | BIT(4)); + samsung_set_timer_source(SAMSUNG_PWM2, SAMSUNG_PWM4); xxti_f = 0; xusbxti_f = 24000000; } @@ -1153,7 +1154,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210") .map_io = universal_map_io, .init_machine = universal_machine_init, .init_late = exynos_init_late, - .init_time = exynos_init_time, + .init_time = samsung_timer_init, .reserve = &universal_reserve, .restart = exynos4_restart, MACHINE_END diff --git a/trunk/arch/arm/mach-imx/clk-imx6q.c b/trunk/arch/arm/mach-imx/clk-imx6q.c index 4e3148ce852d..151259003086 100644 --- a/trunk/arch/arm/mach-imx/clk-imx6q.c +++ b/trunk/arch/arm/mach-imx/clk-imx6q.c @@ -177,18 +177,17 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode) static const char *step_sels[] = { "osc", "pll2_pfd2_396m", }; static const char *pll1_sw_sels[] = { "pll1_sys", "step", }; static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", }; -static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", "dummy", }; -static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", }; +static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", }; static const char *periph_sels[] = { "periph_pre", "periph_clk2", }; static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", }; -static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", }; +static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", }; static const char *audio_sels[] = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", }; static const char *gpu_axi_sels[] = { "axi", "ahb", }; static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", }; static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; -static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", }; +static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; -static const char *ldb_di_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; +static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; @@ -370,8 +369,8 @@ int __init mx6q_clocks_init(void) clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); - clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); - clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); + clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); + clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels)); clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); @@ -499,7 +498,7 @@ int __init mx6q_clocks_init(void) clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14); clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10); clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16); - clk[mlb] = imx_clk_gate2("mlb", "axi", base + 0x74, 18); + clk[mlb] = imx_clk_gate2("mlb", "pll8_mlb", base + 0x74, 18); clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20); clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22); clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28); diff --git a/trunk/arch/arm/mach-imx/headsmp.S b/trunk/arch/arm/mach-imx/headsmp.S index 627f16f0e9d1..67b9c48dcafe 100644 --- a/trunk/arch/arm/mach-imx/headsmp.S +++ b/trunk/arch/arm/mach-imx/headsmp.S @@ -18,20 +18,8 @@ .section ".text.head", "ax" #ifdef CONFIG_SMP -diag_reg_offset: - .word g_diag_reg - . - - .macro set_diag_reg - adr r0, diag_reg_offset - ldr r1, [r0] - add r1, r1, r0 @ r1 = physical &g_diag_reg - ldr r0, [r1] - mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register - .endm - ENTRY(v7_secondary_startup) bl v7_invalidate_l1 - set_diag_reg b secondary_startup ENDPROC(v7_secondary_startup) #endif diff --git a/trunk/arch/arm/mach-imx/platsmp.c b/trunk/arch/arm/mach-imx/platsmp.c index c6e1ab544882..4a69305db65e 100644 --- a/trunk/arch/arm/mach-imx/platsmp.c +++ b/trunk/arch/arm/mach-imx/platsmp.c @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -22,7 +21,6 @@ #define SCU_STANDBY_ENABLE (1 << 5) -u32 g_diag_reg; static void __iomem *scu_base; static struct map_desc scu_io_desc __initdata = { @@ -82,18 +80,6 @@ void imx_smp_prepare(void) static void __init imx_smp_prepare_cpus(unsigned int max_cpus) { imx_smp_prepare(); - - /* - * The diagnostic register holds the errata bits. Mostly bootloader - * does not bring up secondary cores, so that when errata bits are set - * in bootloader, they are set only for boot cpu. But on a SMP - * configuration, it should be equally done on every single core. - * Read the register from boot cpu here, and will replicate it into - * secondary cores when booting them. - */ - asm("mrc p15, 0, %0, c15, c0, 1" : "=r" (g_diag_reg) : : "cc"); - __cpuc_flush_dcache_area(&g_diag_reg, sizeof(g_diag_reg)); - outer_clean_range(__pa(&g_diag_reg), __pa(&g_diag_reg + 1)); } struct smp_operations imx_smp_ops __initdata = { diff --git a/trunk/arch/arm/mach-kirkwood/board-ts219.c b/trunk/arch/arm/mach-kirkwood/board-ts219.c index 4695d5f35fc9..acb0187c7ee1 100644 --- a/trunk/arch/arm/mach-kirkwood/board-ts219.c +++ b/trunk/arch/arm/mach-kirkwood/board-ts219.c @@ -41,3 +41,13 @@ void __init qnap_dt_ts219_init(void) pm_power_off = qnap_tsx1x_power_off; } + +/* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */ +static int __init ts219_pci_init(void) +{ + if (machine_is_ts219()) + kirkwood_pcie_init(KW_PCIE0); + + return 0; +} +subsys_initcall(ts219_pci_init); diff --git a/trunk/arch/arm/mach-kirkwood/common.c b/trunk/arch/arm/mach-kirkwood/common.c index f38922897563..c2cae69e6d2b 100644 --- a/trunk/arch/arm/mach-kirkwood/common.c +++ b/trunk/arch/arm/mach-kirkwood/common.c @@ -528,6 +528,12 @@ void __init kirkwood_init_early(void) { orion_time_set_base(TIMER_VIRT_BASE); + /* + * Some Kirkwood devices allocate their coherent buffers from atomic + * context. Increase size of atomic coherent pool to make sure such + * the allocations won't fail. + */ + init_dma_coherent_pool_size(SZ_1M); mvebu_mbus_init("marvell,kirkwood-mbus", BRIDGE_WINS_BASE, BRIDGE_WINS_SZ, DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ); diff --git a/trunk/arch/arm/mach-kirkwood/mpp.c b/trunk/arch/arm/mach-kirkwood/mpp.c index e96fd71abd76..827cde42414f 100644 --- a/trunk/arch/arm/mach-kirkwood/mpp.c +++ b/trunk/arch/arm/mach-kirkwood/mpp.c @@ -22,10 +22,9 @@ static unsigned int __init kirkwood_variant(void) kirkwood_pcie_id(&dev, &rev); - if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) + if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) || + (dev == MV88F6282_DEV_ID)) return MPP_F6281_MASK; - if (dev == MV88F6282_DEV_ID) - return MPP_F6282_MASK; if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0) return MPP_F6192_MASK; if (dev == MV88F6180_DEV_ID) diff --git a/trunk/arch/arm/mach-kirkwood/ts219-setup.c b/trunk/arch/arm/mach-kirkwood/ts219-setup.c index e1267d6b468f..283abff90228 100644 --- a/trunk/arch/arm/mach-kirkwood/ts219-setup.c +++ b/trunk/arch/arm/mach-kirkwood/ts219-setup.c @@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void) static int __init ts219_pci_init(void) { if (machine_is_ts219()) - kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0); + kirkwood_pcie_init(KW_PCIE0); return 0; } diff --git a/trunk/arch/arm/mach-mvebu/Kconfig b/trunk/arch/arm/mach-mvebu/Kconfig index 80a8bcacd9d5..e11acbb0a46d 100644 --- a/trunk/arch/arm/mach-mvebu/Kconfig +++ b/trunk/arch/arm/mach-mvebu/Kconfig @@ -15,7 +15,6 @@ config ARCH_MVEBU select MVEBU_CLK_GATING select MVEBU_MBUS select ZONE_DMA if ARM_LPAE - select ARCH_REQUIRE_GPIOLIB if ARCH_MVEBU diff --git a/trunk/arch/arm/mach-mvebu/armada-370-xp.c b/trunk/arch/arm/mach-mvebu/armada-370-xp.c index 1c48890bb72b..42a4cb3087e2 100644 --- a/trunk/arch/arm/mach-mvebu/armada-370-xp.c +++ b/trunk/arch/arm/mach-mvebu/armada-370-xp.c @@ -53,6 +53,13 @@ void __init armada_370_xp_init_early(void) { char *mbus_soc_name; + /* + * Some Armada 370/XP devices allocate their coherent buffers + * from atomic context. Increase size of atomic coherent pool + * to make sure such the allocations won't fail. + */ + init_dma_coherent_pool_size(SZ_1M); + /* * This initialization will be replaced by a DT-based * initialization once the mvebu-mbus driver gains DT support. diff --git a/trunk/arch/arm/mach-mvebu/coherency_ll.S b/trunk/arch/arm/mach-mvebu/coherency_ll.S index 5476669ba905..53e8391192cd 100644 --- a/trunk/arch/arm/mach-mvebu/coherency_ll.S +++ b/trunk/arch/arm/mach-mvebu/coherency_ll.S @@ -32,21 +32,15 @@ ENTRY(ll_set_cpu_coherent) /* Add CPU to SMP group - Atomic */ add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET -1: - ldrex r2, [r3] + ldr r2, [r3] orr r2, r2, r1 - strex r0, r2, [r3] - cmp r0, #0 - bne 1b + str r2, [r3] /* Enable coherency on CPU - Atomic */ - add r3, r3, #ARMADA_XP_CFB_CFG_REG_OFFSET -1: - ldrex r2, [r3] + add r3, r0, #ARMADA_XP_CFB_CFG_REG_OFFSET + ldr r2, [r3] orr r2, r2, r1 - strex r0, r2, [r3] - cmp r0, #0 - bne 1b + str r2, [r3] dsb diff --git a/trunk/arch/arm/mach-omap1/dma.c b/trunk/arch/arm/mach-omap1/dma.c index a94b3a718d1a..68ab858e27b7 100644 --- a/trunk/arch/arm/mach-omap1/dma.c +++ b/trunk/arch/arm/mach-omap1/dma.c @@ -345,7 +345,6 @@ static int __init omap1_system_dma_init(void) dev_err(&pdev->dev, "%s: Memory allocation failed for d->chan!\n", __func__); - ret = -ENOMEM; goto exit_release_d; } diff --git a/trunk/arch/arm/mach-omap2/cclock33xx_data.c b/trunk/arch/arm/mach-omap2/cclock33xx_data.c index af3544ce4f02..6ebc7803bc3e 100644 --- a/trunk/arch/arm/mach-omap2/cclock33xx_data.c +++ b/trunk/arch/arm/mach-omap2/cclock33xx_data.c @@ -454,29 +454,9 @@ DEFINE_CLK_GATE(cefuse_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0, */ DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732); -static struct clk clkdiv32k_ick; - -static const char *clkdiv32k_ick_parent_names[] = { - "clkdiv32k_ck", -}; - -static const struct clk_ops clkdiv32k_ick_ops = { - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .init = &omap2_init_clk_clkdm, -}; - -static struct clk_hw_omap clkdiv32k_ick_hw = { - .hw = { - .clk = &clkdiv32k_ick, - }, - .enable_reg = AM33XX_CM_PER_CLKDIV32K_CLKCTRL, - .enable_bit = AM33XX_MODULEMODE_SWCTRL_SHIFT, - .clkdm_name = "clk_24mhz_clkdm", -}; - -DEFINE_STRUCT_CLK(clkdiv32k_ick, clkdiv32k_ick_parent_names, clkdiv32k_ick_ops); +DEFINE_CLK_GATE(clkdiv32k_ick, "clkdiv32k_ck", &clkdiv32k_ck, 0x0, + AM33XX_CM_PER_CLKDIV32K_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT, + 0x0, NULL); /* "usbotg_fck" is an additional clock and not really a modulemode */ DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0, diff --git a/trunk/arch/arm/mach-omap2/clock36xx.c b/trunk/arch/arm/mach-omap2/clock36xx.c index bbd6a3f717e6..8f3bf4e50908 100644 --- a/trunk/arch/arm/mach-omap2/clock36xx.c +++ b/trunk/arch/arm/mach-omap2/clock36xx.c @@ -20,12 +20,11 @@ #include #include -#include #include #include "clock.h" #include "clock36xx.h" -#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) + /** * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering @@ -40,28 +39,29 @@ */ int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk) { - struct clk_divider *parent; + struct clk_hw_omap *parent; struct clk_hw *parent_hw; - u32 dummy_v, orig_v; + u32 dummy_v, orig_v, clksel_shift; int ret; /* Clear PWRDN bit of HSDIVIDER */ ret = omap2_dflt_clk_enable(clk); parent_hw = __clk_get_hw(__clk_get_parent(clk->clk)); - parent = to_clk_divider(parent_hw); + parent = to_clk_hw_omap(parent_hw); /* Restore the dividers */ if (!ret) { - orig_v = __raw_readl(parent->reg); + clksel_shift = __ffs(parent->clksel_mask); + orig_v = __raw_readl(parent->clksel_reg); dummy_v = orig_v; /* Write any other value different from the Read value */ - dummy_v ^= (1 << parent->shift); - __raw_writel(dummy_v, parent->reg); + dummy_v ^= (1 << clksel_shift); + __raw_writel(dummy_v, parent->clksel_reg); /* Write the original divider */ - __raw_writel(orig_v, parent->reg); + __raw_writel(orig_v, parent->clksel_reg); } return ret; diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod.c b/trunk/arch/arm/mach-omap2/omap_hwmod.c index 7341eff63f56..d25a95fe9921 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod.c @@ -1356,27 +1356,13 @@ static void _enable_sysc(struct omap_hwmod *oh) clkdm = _get_clkdm(oh); if (sf & SYSC_HAS_SIDLEMODE) { - if (oh->flags & HWMOD_SWSUP_SIDLE || - oh->flags & HWMOD_SWSUP_SIDLE_ACT) { - idlemode = HWMOD_IDLEMODE_NO; - } else { - if (sf & SYSC_HAS_ENAWAKEUP) - _enable_wakeup(oh, &v); - if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) - idlemode = HWMOD_IDLEMODE_SMART_WKUP; - else - idlemode = HWMOD_IDLEMODE_SMART; - } - - /* - * This is special handling for some IPs like - * 32k sync timer. Force them to idle! - */ clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU); if (clkdm_act && !(oh->class->sysc->idlemodes & (SIDLE_SMART | SIDLE_SMART_WKUP))) idlemode = HWMOD_IDLEMODE_FORCE; - + else + idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ? + HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART; _set_slave_idlemode(oh, idlemode, &v); } @@ -1405,6 +1391,10 @@ static void _enable_sysc(struct omap_hwmod *oh) (sf & SYSC_HAS_CLOCKACTIVITY)) _set_clockactivity(oh, oh->class->sysc->clockact, &v); + /* If slave is in SMARTIDLE, also enable wakeup */ + if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE)) + _enable_wakeup(oh, &v); + _write_sysconfig(v, oh); /* @@ -1440,16 +1430,13 @@ static void _idle_sysc(struct omap_hwmod *oh) sf = oh->class->sysc->sysc_flags; if (sf & SYSC_HAS_SIDLEMODE) { - if (oh->flags & HWMOD_SWSUP_SIDLE) { + /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */ + if (oh->flags & HWMOD_SWSUP_SIDLE || + !(oh->class->sysc->idlemodes & + (SIDLE_SMART | SIDLE_SMART_WKUP))) idlemode = HWMOD_IDLEMODE_FORCE; - } else { - if (sf & SYSC_HAS_ENAWAKEUP) - _enable_wakeup(oh, &v); - if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) - idlemode = HWMOD_IDLEMODE_SMART_WKUP; - else - idlemode = HWMOD_IDLEMODE_SMART; - } + else + idlemode = HWMOD_IDLEMODE_SMART; _set_slave_idlemode(oh, idlemode, &v); } @@ -1468,6 +1455,10 @@ static void _idle_sysc(struct omap_hwmod *oh) _set_master_standbymode(oh, idlemode, &v); } + /* If slave is in SMARTIDLE, also enable wakeup */ + if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE)) + _enable_wakeup(oh, &v); + _write_sysconfig(v, oh); } @@ -2074,7 +2065,7 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh) * do so is present in the hwmod data, then call it and pass along the * return value; otherwise, return 0. */ -static int _enable_preprogram(struct omap_hwmod *oh) +static int __init _enable_preprogram(struct omap_hwmod *oh) { if (!oh->class->enable_preprogram) return 0; @@ -2254,6 +2245,42 @@ static int _idle(struct omap_hwmod *oh) return 0; } +/** + * omap_hwmod_set_ocp_autoidle - set the hwmod's OCP autoidle bit + * @oh: struct omap_hwmod * + * @autoidle: desired AUTOIDLE bitfield value (0 or 1) + * + * Sets the IP block's OCP autoidle bit in hardware, and updates our + * local copy. Intended to be used by drivers that require + * direct manipulation of the AUTOIDLE bits. + * Returns -EINVAL if @oh is null or is not in the ENABLED state, or passes + * along the return value from _set_module_autoidle(). + * + * Any users of this function should be scrutinized carefully. + */ +int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle) +{ + u32 v; + int retval = 0; + unsigned long flags; + + if (!oh || oh->_state != _HWMOD_STATE_ENABLED) + return -EINVAL; + + spin_lock_irqsave(&oh->_lock, flags); + + v = oh->_sysc_cache; + + retval = _set_module_autoidle(oh, autoidle, &v); + + if (!retval) + _write_sysconfig(v, oh); + + spin_unlock_irqrestore(&oh->_lock, flags); + + return retval; +} + /** * _shutdown - shutdown an omap_hwmod * @oh: struct omap_hwmod * @@ -3152,6 +3179,38 @@ int omap_hwmod_softreset(struct omap_hwmod *oh) return ret; } +/** + * omap_hwmod_set_slave_idlemode - set the hwmod's OCP slave idlemode + * @oh: struct omap_hwmod * + * @idlemode: SIDLEMODE field bits (shifted to bit 0) + * + * Sets the IP block's OCP slave idlemode in hardware, and updates our + * local copy. Intended to be used by drivers that have some erratum + * that requires direct manipulation of the SIDLEMODE bits. Returns + * -EINVAL if @oh is null, or passes along the return value from + * _set_slave_idlemode(). + * + * XXX Does this function have any current users? If not, we should + * remove it; it is better to let the rest of the hwmod code handle this. + * Any users of this function should be scrutinized carefully. + */ +int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode) +{ + u32 v; + int retval = 0; + + if (!oh) + return -EINVAL; + + v = oh->_sysc_cache; + + retval = _set_slave_idlemode(oh, idlemode, &v); + if (!retval) + _write_sysconfig(v, oh); + + return retval; +} + /** * omap_hwmod_lookup - look up a registered omap_hwmod by name * @name: name of the omap_hwmod to look up diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod.h b/trunk/arch/arm/mach-omap2/omap_hwmod.h index 0c898f58ac9b..fe5962921f07 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod.h +++ b/trunk/arch/arm/mach-omap2/omap_hwmod.h @@ -463,9 +463,6 @@ struct omap_hwmod_omap4_prcm { * is kept in force-standby mode. Failing to do so causes PM problems * with musb on OMAP3630 at least. Note that musb has a dedicated register * to control MSTANDBY signal when MIDLEMODE is set to force-standby. - * HWMOD_SWSUP_SIDLE_ACT: omap_hwmod code should manually bring the module - * out of idle, but rely on smart-idle to the put it back in idle, - * so the wakeups are still functional (Only known case for now is UART) */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -479,7 +476,6 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) #define HWMOD_BLOCK_WFI (1 << 10) #define HWMOD_FORCE_MSTANDBY (1 << 11) -#define HWMOD_SWSUP_SIDLE_ACT (1 << 12) /* * omap_hwmod._int_flags definitions @@ -645,6 +641,9 @@ int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name); int omap_hwmod_enable_clocks(struct omap_hwmod *oh); int omap_hwmod_disable_clocks(struct omap_hwmod *oh); +int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode); +int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle); + int omap_hwmod_reset(struct omap_hwmod *oh); void omap_hwmod_ocp_barrier(struct omap_hwmod *oh); diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index d05fc7b54567..c8c64b3e1acc 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c @@ -512,7 +512,6 @@ struct omap_hwmod omap2xxx_uart1_hwmod = { .mpu_irqs = omap2_uart1_mpu_irqs, .sdma_reqs = omap2_uart1_sdma_reqs, .main_clk = "uart1_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, @@ -532,7 +531,6 @@ struct omap_hwmod omap2xxx_uart2_hwmod = { .mpu_irqs = omap2_uart2_mpu_irqs, .sdma_reqs = omap2_uart2_sdma_reqs, .main_clk = "uart2_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, @@ -552,7 +550,6 @@ struct omap_hwmod omap2xxx_uart3_hwmod = { .mpu_irqs = omap2_uart3_mpu_irqs, .sdma_reqs = omap2_uart3_sdma_reqs, .main_clk = "uart3_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 69337af748cc..01d8f324450a 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_33xx_data.c @@ -1995,7 +1995,6 @@ static struct omap_hwmod am33xx_uart1_hwmod = { .name = "uart1", .class = &uart_class, .clkdm_name = "l4_wkup_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, .mpu_irqs = am33xx_uart1_irqs, .sdma_reqs = uart1_edma_reqs, .main_clk = "dpll_per_m2_div4_wkupdm_ck", @@ -2007,13 +2006,6 @@ static struct omap_hwmod am33xx_uart1_hwmod = { }, }; -/* uart2 */ -static struct omap_hwmod_dma_info uart2_edma_reqs[] = { - { .name = "tx", .dma_req = 28, }, - { .name = "rx", .dma_req = 29, }, - { .dma_req = -1 } -}; - static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = { { .irq = 73 + OMAP_INTC_START, }, { .irq = -1 }, @@ -2023,9 +2015,8 @@ static struct omap_hwmod am33xx_uart2_hwmod = { .name = "uart2", .class = &uart_class, .clkdm_name = "l4ls_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, .mpu_irqs = am33xx_uart2_irqs, - .sdma_reqs = uart2_edma_reqs, + .sdma_reqs = uart1_edma_reqs, .main_clk = "dpll_per_m2_div4_ck", .prcm = { .omap4 = { @@ -2051,7 +2042,6 @@ static struct omap_hwmod am33xx_uart3_hwmod = { .name = "uart3", .class = &uart_class, .clkdm_name = "l4ls_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, .mpu_irqs = am33xx_uart3_irqs, .sdma_reqs = uart3_edma_reqs, .main_clk = "dpll_per_m2_div4_ck", @@ -2072,7 +2062,6 @@ static struct omap_hwmod am33xx_uart4_hwmod = { .name = "uart4", .class = &uart_class, .clkdm_name = "l4ls_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, .mpu_irqs = am33xx_uart4_irqs, .sdma_reqs = uart1_edma_reqs, .main_clk = "dpll_per_m2_div4_ck", @@ -2093,7 +2082,6 @@ static struct omap_hwmod am33xx_uart5_hwmod = { .name = "uart5", .class = &uart_class, .clkdm_name = "l4ls_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, .mpu_irqs = am33xx_uart5_irqs, .sdma_reqs = uart1_edma_reqs, .main_clk = "dpll_per_m2_div4_ck", @@ -2114,7 +2102,6 @@ static struct omap_hwmod am33xx_uart6_hwmod = { .name = "uart6", .class = &uart_class, .clkdm_name = "l4ls_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, .mpu_irqs = am33xx_uart6_irqs, .sdma_reqs = uart1_edma_reqs, .main_clk = "dpll_per_m2_div4_ck", diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 31c7126eb3bb..4083606ea1da 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -490,7 +490,6 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = { .mpu_irqs = omap2_uart1_mpu_irqs, .sdma_reqs = omap2_uart1_sdma_reqs, .main_clk = "uart1_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, @@ -509,7 +508,6 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = { .mpu_irqs = omap2_uart2_mpu_irqs, .sdma_reqs = omap2_uart2_sdma_reqs, .main_clk = "uart2_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, @@ -528,7 +526,6 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = { .mpu_irqs = omap2_uart3_mpu_irqs, .sdma_reqs = omap2_uart3_sdma_reqs, .main_clk = "uart3_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, @@ -558,7 +555,6 @@ static struct omap_hwmod omap36xx_uart4_hwmod = { .mpu_irqs = uart4_mpu_irqs, .sdma_reqs = uart4_sdma_reqs, .main_clk = "uart4_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 848b6dc67590..eaba9dc91a0d 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -3434,7 +3434,6 @@ static struct omap_hwmod omap44xx_uart1_hwmod = { .name = "uart1", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, .mpu_irqs = omap44xx_uart1_irqs, .sdma_reqs = omap44xx_uart1_sdma_reqs, .main_clk = "func_48m_fclk", @@ -3463,7 +3462,6 @@ static struct omap_hwmod omap44xx_uart2_hwmod = { .name = "uart2", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, .mpu_irqs = omap44xx_uart2_irqs, .sdma_reqs = omap44xx_uart2_sdma_reqs, .main_clk = "func_48m_fclk", @@ -3492,8 +3490,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = { .name = "uart3", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", - .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET | - HWMOD_SWSUP_SIDLE_ACT, + .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, .mpu_irqs = omap44xx_uart3_irqs, .sdma_reqs = omap44xx_uart3_sdma_reqs, .main_clk = "func_48m_fclk", @@ -3522,7 +3519,6 @@ static struct omap_hwmod omap44xx_uart4_hwmod = { .name = "uart4", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, .mpu_irqs = omap44xx_uart4_irqs, .sdma_reqs = omap44xx_uart4_sdma_reqs, .main_clk = "func_48m_fclk", diff --git a/trunk/arch/arm/mach-omap2/pm34xx.c b/trunk/arch/arm/mach-omap2/pm34xx.c index 5a2d8034c8de..c01859398b54 100644 --- a/trunk/arch/arm/mach-omap2/pm34xx.c +++ b/trunk/arch/arm/mach-omap2/pm34xx.c @@ -546,10 +546,8 @@ static void __init prcm_setup_regs(void) /* Clear any pending PRCM interrupts */ omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); - /* - * We need to idle iva2_pwrdm even on am3703 with no iva2. - */ - omap3_iva_idle(); + if (omap3_has_iva()) + omap3_iva_idle(); omap3_d2d_idle(); } diff --git a/trunk/arch/arm/mach-omap2/serial.c b/trunk/arch/arm/mach-omap2/serial.c index f6601563aa69..8396b5b7e912 100644 --- a/trunk/arch/arm/mach-omap2/serial.c +++ b/trunk/arch/arm/mach-omap2/serial.c @@ -95,9 +95,38 @@ static void omap_uart_enable_wakeup(struct device *dev, bool enable) omap_hwmod_disable_wakeup(od->hwmods[0]); } +/* + * Errata i291: [UART]:Cannot Acknowledge Idle Requests + * in Smartidle Mode When Configured for DMA Operations. + * WA: configure uart in force idle mode. + */ +static void omap_uart_set_noidle(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct omap_device *od = to_omap_device(pdev); + + omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO); +} + +static void omap_uart_set_smartidle(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct omap_device *od = to_omap_device(pdev); + u8 idlemode; + + if (od->hwmods[0]->class->sysc->idlemodes & SIDLE_SMART_WKUP) + idlemode = HWMOD_IDLEMODE_SMART_WKUP; + else + idlemode = HWMOD_IDLEMODE_SMART; + + omap_hwmod_set_slave_idlemode(od->hwmods[0], idlemode); +} + #else static void omap_uart_enable_wakeup(struct device *dev, bool enable) {} +static void omap_uart_set_noidle(struct device *dev) {} +static void omap_uart_set_smartidle(struct device *dev) {} #endif /* CONFIG_PM */ #ifdef CONFIG_OMAP_MUX @@ -270,6 +299,8 @@ void __init omap_serial_init_port(struct omap_board_data *bdata, omap_up.uartclk = OMAP24XX_BASE_BAUD * 16; omap_up.flags = UPF_BOOT_AUTOCONF; omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count; + omap_up.set_forceidle = omap_uart_set_smartidle; + omap_up.set_noidle = omap_uart_set_noidle; omap_up.enable_wakeup = omap_uart_enable_wakeup; omap_up.dma_rx_buf_size = info->dma_rx_buf_size; omap_up.dma_rx_timeout = info->dma_rx_timeout; diff --git a/trunk/arch/arm/mach-orion5x/common.c b/trunk/arch/arm/mach-orion5x/common.c index f8a6db9239bf..b97fd672e89d 100644 --- a/trunk/arch/arm/mach-orion5x/common.c +++ b/trunk/arch/arm/mach-orion5x/common.c @@ -199,6 +199,13 @@ void __init orion5x_init_early(void) orion_time_set_base(TIMER_VIRT_BASE); + /* + * Some Orion5x devices allocate their coherent buffers from atomic + * context. Increase size of atomic coherent pool to make sure such + * the allocations won't fail. + */ + init_dma_coherent_pool_size(SZ_1M); + /* Initialize the MBUS driver */ orion5x_pcie_id(&dev, &rev); if (dev == MV88F5281_DEV_ID) diff --git a/trunk/arch/arm/mach-prima2/pm.c b/trunk/arch/arm/mach-prima2/pm.c index 8f595c0cc8d9..9936c180bf01 100644 --- a/trunk/arch/arm/mach-prima2/pm.c +++ b/trunk/arch/arm/mach-prima2/pm.c @@ -101,10 +101,8 @@ static int __init sirfsoc_of_pwrc_init(void) struct device_node *np; np = of_find_matching_node(NULL, pwrc_ids); - if (!np) { - pr_err("unable to find compatible sirf pwrc node in dtb\n"); - return -ENOENT; - } + if (!np) + panic("unable to find compatible pwrc node in dtb\n"); /* * pwrc behind rtciobrg is not located in memory space diff --git a/trunk/arch/arm/mach-prima2/rstc.c b/trunk/arch/arm/mach-prima2/rstc.c index d5e0cbc934c0..435019ca0a48 100644 --- a/trunk/arch/arm/mach-prima2/rstc.c +++ b/trunk/arch/arm/mach-prima2/rstc.c @@ -28,10 +28,8 @@ static int __init sirfsoc_of_rstc_init(void) struct device_node *np; np = of_find_matching_node(NULL, rstc_ids); - if (!np) { - pr_err("unable to find compatible sirf rstc node in dtb\n"); - return -ENOENT; - } + if (!np) + panic("unable to find compatible rstc node in dtb\n"); sirfsoc_rstc_base = of_iomap(np, 0); if (!sirfsoc_rstc_base) diff --git a/trunk/arch/arm/mach-shmobile/board-marzen.c b/trunk/arch/arm/mach-shmobile/board-marzen.c index b9594e911ce7..91052855cc12 100644 --- a/trunk/arch/arm/mach-shmobile/board-marzen.c +++ b/trunk/arch/arm/mach-shmobile/board-marzen.c @@ -212,8 +212,8 @@ static struct platform_device *marzen_devices[] __initdata = { static struct usb_phy *phy; static int usb_power_on(struct platform_device *pdev) { - if (IS_ERR(phy)) - return PTR_ERR(phy); + if (!phy) + return -EIO; pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); @@ -225,7 +225,7 @@ static int usb_power_on(struct platform_device *pdev) static void usb_power_off(struct platform_device *pdev) { - if (IS_ERR(phy)) + if (!phy) return; usb_phy_shutdown(phy); diff --git a/trunk/arch/arm/mach-shmobile/setup-sh73a0.c b/trunk/arch/arm/mach-shmobile/setup-sh73a0.c index 9696f3646864..fdf3894b1cc3 100644 --- a/trunk/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/trunk/arch/arm/mach-shmobile/setup-sh73a0.c @@ -252,7 +252,7 @@ static struct sh_timer_config cmt10_platform_data = { .name = "CMT10", .channel_offset = 0x10, .timer_bit = 0, - .clockevent_rating = 80, + .clockevent_rating = 125, .clocksource_rating = 125, }; diff --git a/trunk/arch/arm/mach-sunxi/Kconfig b/trunk/arch/arm/mach-sunxi/Kconfig index 5b045e302b43..d259c782d742 100644 --- a/trunk/arch/arm/mach-sunxi/Kconfig +++ b/trunk/arch/arm/mach-sunxi/Kconfig @@ -1,6 +1,5 @@ config ARCH_SUNXI bool "Allwinner A1X SOCs" if ARCH_MULTI_V7 - select ARCH_REQUIRE_GPIOLIB select CLKSRC_MMIO select CLKSRC_OF select COMMON_CLK diff --git a/trunk/arch/arm/mach-tegra/tegra2_emc.c b/trunk/arch/arm/mach-tegra/tegra2_emc.c index 31e69a019bdd..9e8bdfa2b369 100644 --- a/trunk/arch/arm/mach-tegra/tegra2_emc.c +++ b/trunk/arch/arm/mach-tegra/tegra2_emc.c @@ -307,6 +307,11 @@ static int tegra_emc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "missing register base\n"); + return -ENOMEM; + } + emc_regbase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(emc_regbase)) return PTR_ERR(emc_regbase); diff --git a/trunk/arch/arm/mach-ux500/Kconfig b/trunk/arch/arm/mach-ux500/Kconfig index b19b07204aaf..6a4387e39df8 100644 --- a/trunk/arch/arm/mach-ux500/Kconfig +++ b/trunk/arch/arm/mach-ux500/Kconfig @@ -51,7 +51,6 @@ config MACH_MOP500 bool "U8500 Development platform, MOP500 versions" select I2C select I2C_NOMADIK - select REGULATOR select REGULATOR_FIXED_VOLTAGE select SOC_BUS select UX500_SOC_DB8500 diff --git a/trunk/arch/arm/mach-ux500/board-mop500-regulators.c b/trunk/arch/arm/mach-ux500/board-mop500-regulators.c index d6b7c8556fa1..33c353bc1c4a 100644 --- a/trunk/arch/arm/mach-ux500/board-mop500-regulators.c +++ b/trunk/arch/arm/mach-ux500/board-mop500-regulators.c @@ -374,7 +374,6 @@ static struct ab8500_regulator_reg_init ab8500_reg_init[] = { static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { /* supplies to the display/camera */ [AB8500_LDO_AUX1] = { - .supply_regulator = "ab8500-ext-supply3", .constraints = { .name = "V-DISPLAY", .min_uV = 2800000, @@ -388,7 +387,6 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { }, /* supplies to the on-board eMMC */ [AB8500_LDO_AUX2] = { - .supply_regulator = "ab8500-ext-supply3", .constraints = { .name = "V-eMMC1", .min_uV = 1100000, @@ -404,7 +402,6 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { }, /* supply for VAUX3, supplies to SDcard slots */ [AB8500_LDO_AUX3] = { - .supply_regulator = "ab8500-ext-supply3", .constraints = { .name = "V-MMC-SD", .min_uV = 1100000, diff --git a/trunk/arch/arm/mach-ux500/board-mop500.c b/trunk/arch/arm/mach-ux500/board-mop500.c index 78389de94dde..3cd555ac6d0a 100644 --- a/trunk/arch/arm/mach-ux500/board-mop500.c +++ b/trunk/arch/arm/mach-ux500/board-mop500.c @@ -623,7 +623,7 @@ static void __init mop500_init_machine(void) sdi0_reg_info.gpios[0].gpio = GPIO_SDMMC_1V8_3V_SEL; mop500_pinmaps_init(); - parent = u8500_init_devices(); + parent = u8500_init_devices(&ab8500_platdata); for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) mop500_platform_devs[i]->dev.parent = parent; @@ -660,7 +660,7 @@ static void __init snowball_init_machine(void) sdi0_reg_info.gpios[0].gpio = SNOWBALL_SDMMC_1V8_3V_GPIO; snowball_pinmaps_init(); - parent = u8500_init_devices(); + parent = u8500_init_devices(&ab8500_platdata); for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++) snowball_platform_devs[i]->dev.parent = parent; @@ -698,7 +698,7 @@ static void __init hrefv60_init_machine(void) sdi0_reg_info.gpios[0].gpio = HREFV60_SDMMC_1V8_3V_GPIO; hrefv60_pinmaps_init(); - parent = u8500_init_devices(); + parent = u8500_init_devices(&ab8500_platdata); for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) mop500_platform_devs[i]->dev.parent = parent; diff --git a/trunk/arch/arm/mach-ux500/cpu-db8500.c b/trunk/arch/arm/mach-ux500/cpu-db8500.c index 46cca52890bc..e90b5ab23b6d 100644 --- a/trunk/arch/arm/mach-ux500/cpu-db8500.c +++ b/trunk/arch/arm/mach-ux500/cpu-db8500.c @@ -206,7 +206,7 @@ static struct device * __init db8500_soc_device_init(void) /* * This function is called from the board init */ -struct device * __init u8500_init_devices(void) +struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500) { struct device *parent; int i; @@ -220,6 +220,8 @@ struct device * __init u8500_init_devices(void) for (i = 0; i < ARRAY_SIZE(platform_devs); i++) platform_devs[i]->dev.parent = parent; + db8500_prcmu_device.dev.platform_data = ab8500; + platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); return parent; @@ -276,7 +278,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL), OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", &db8500_prcmu_pdata), - OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x.0", NULL), + OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x", NULL), /* Requires device name bindings. */ OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE, "pinctrl-db8500", NULL), diff --git a/trunk/arch/arm/mach-ux500/cpuidle.c b/trunk/arch/arm/mach-ux500/cpuidle.c index a45dd09daed9..317a2be129fb 100644 --- a/trunk/arch/arm/mach-ux500/cpuidle.c +++ b/trunk/arch/arm/mach-ux500/cpuidle.c @@ -21,7 +21,6 @@ #include #include "db8500-regs.h" -#include "id.h" static atomic_t master = ATOMIC_INIT(0); static DEFINE_SPINLOCK(master_lock); @@ -115,9 +114,6 @@ static struct cpuidle_driver ux500_idle_driver = { int __init ux500_idle_init(void) { - if (!(cpu_is_u8500_family() || cpu_is_ux540_family())) - return -ENODEV; - /* Configure wake up reasons */ prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | PRCMU_WAKEUP(ABB)); diff --git a/trunk/arch/arm/mach-ux500/setup.h b/trunk/arch/arm/mach-ux500/setup.h index cad3ca86c540..bddce2b49372 100644 --- a/trunk/arch/arm/mach-ux500/setup.h +++ b/trunk/arch/arm/mach-ux500/setup.h @@ -18,7 +18,7 @@ void __init ux500_map_io(void); extern void __init u8500_map_io(void); -extern struct device * __init u8500_init_devices(void); +extern struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500); extern void __init ux500_init_irq(void); extern void __init ux500_init_late(void); diff --git a/trunk/arch/arm/mach-vt8500/vt8500.c b/trunk/arch/arm/mach-vt8500/vt8500.c index f5c33df7a597..1dd281efc020 100644 --- a/trunk/arch/arm/mach-vt8500/vt8500.c +++ b/trunk/arch/arm/mach-vt8500/vt8500.c @@ -173,7 +173,6 @@ static const char * const vt8500_dt_compat[] = { "wm,wm8505", "wm,wm8750", "wm,wm8850", - NULL }; DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)") diff --git a/trunk/arch/arm/mm/cache-v7.S b/trunk/arch/arm/mm/cache-v7.S index 515b00064da8..15451ee4acc8 100644 --- a/trunk/arch/arm/mm/cache-v7.S +++ b/trunk/arch/arm/mm/cache-v7.S @@ -92,14 +92,6 @@ ENTRY(v7_flush_dcache_louis) mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr -#ifdef CONFIG_ARM_ERRATA_643719 - ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register - ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do - ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? - biceq r2, r2, #0x0000000f @ clear minor revision number - teqeq r2, r1 @ test for errata affected core and if so... - orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne') -#endif ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 moveq pc, lr @ return if level == 0 diff --git a/trunk/arch/arm/mm/flush.c b/trunk/arch/arm/mm/flush.c index 32aa5861119f..0d473cce501c 100644 --- a/trunk/arch/arm/mm/flush.c +++ b/trunk/arch/arm/mm/flush.c @@ -300,39 +300,6 @@ void flush_dcache_page(struct page *page) } EXPORT_SYMBOL(flush_dcache_page); -/* - * Ensure cache coherency for the kernel mapping of this page. We can - * assume that the page is pinned via kmap. - * - * If the page only exists in the page cache and there are no user - * space mappings, this is a no-op since the page was already marked - * dirty at creation. Otherwise, we need to flush the dirty kernel - * cache lines directly. - */ -void flush_kernel_dcache_page(struct page *page) -{ - if (cache_is_vivt() || cache_is_vipt_aliasing()) { - struct address_space *mapping; - - mapping = page_mapping(page); - - if (!mapping || mapping_mapped(mapping)) { - void *addr; - - addr = page_address(page); - /* - * kmap_atomic() doesn't set the page virtual - * address for highmem pages, and - * kunmap_atomic() takes care of cache - * flushing already. - */ - if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) - __cpuc_flush_dcache_area(addr, PAGE_SIZE); - } - } -} -EXPORT_SYMBOL(flush_kernel_dcache_page); - /* * Flush an anonymous page so that users of get_user_pages() * can safely access the data. The expected sequence is: diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c index 4d409e6a552d..e0d8565671a6 100644 --- a/trunk/arch/arm/mm/mmu.c +++ b/trunk/arch/arm/mm/mmu.c @@ -616,12 +616,10 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } while (pte++, addr += PAGE_SIZE, addr != end); } -static void __init __map_init_section(pmd_t *pmd, unsigned long addr, +static void __init map_init_section(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys, const struct mem_type *type) { - pmd_t *p = pmd; - #ifndef CONFIG_ARM_LPAE /* * In classic MMU format, puds and pmds are folded in to @@ -640,7 +638,7 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr, phys += SECTION_SIZE; } while (pmd++, addr += SECTION_SIZE, addr != end); - flush_pmd_entry(p); + flush_pmd_entry(pmd); } static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, @@ -663,7 +661,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, */ if (type->prot_sect && ((addr | next | phys) & ~SECTION_MASK) == 0) { - __map_init_section(pmd, addr, next, phys, type); + map_init_section(pmd, addr, next, phys, type); } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), type); diff --git a/trunk/arch/arm/mm/nommu.c b/trunk/arch/arm/mm/nommu.c index eb5293a69a84..d51225f90ae2 100644 --- a/trunk/arch/arm/mm/nommu.c +++ b/trunk/arch/arm/mm/nommu.c @@ -57,12 +57,6 @@ void flush_dcache_page(struct page *page) } EXPORT_SYMBOL(flush_dcache_page); -void flush_kernel_dcache_page(struct page *page) -{ - __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); -} -EXPORT_SYMBOL(flush_kernel_dcache_page); - void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len) diff --git a/trunk/arch/arm/mm/proc-fa526.S b/trunk/arch/arm/mm/proc-fa526.S index aaeb6c127c7a..d217e9795d74 100644 --- a/trunk/arch/arm/mm/proc-fa526.S +++ b/trunk/arch/arm/mm/proc-fa526.S @@ -81,6 +81,7 @@ ENDPROC(cpu_fa526_reset) */ .align 4 ENTRY(cpu_fa526_do_idle) + mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mov pc, lr diff --git a/trunk/arch/arm/mm/proc-macros.S b/trunk/arch/arm/mm/proc-macros.S index e3c48a3fe063..f9a0aa725ea9 100644 --- a/trunk/arch/arm/mm/proc-macros.S +++ b/trunk/arch/arm/mm/proc-macros.S @@ -333,8 +333,3 @@ ENTRY(\name\()_tlb_fns) .endif .size \name\()_tlb_fns, . - \name\()_tlb_fns .endm - -.macro globl_equ x, y - .globl \x - .equ \x, \y -.endm diff --git a/trunk/arch/arm/mm/proc-v7.S b/trunk/arch/arm/mm/proc-v7.S index e35fec34453e..2c73a7301ff7 100644 --- a/trunk/arch/arm/mm/proc-v7.S +++ b/trunk/arch/arm/mm/proc-v7.S @@ -138,29 +138,6 @@ ENTRY(cpu_v7_do_resume) mov r0, r8 @ control register b cpu_resume_mmu ENDPROC(cpu_v7_do_resume) -#endif - -#ifdef CONFIG_CPU_PJ4B - globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm - globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext - globl_equ cpu_pj4b_proc_init, cpu_v7_proc_init - globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin - globl_equ cpu_pj4b_reset, cpu_v7_reset -#ifdef CONFIG_PJ4B_ERRATA_4742 -ENTRY(cpu_pj4b_do_idle) - dsb @ WFI may enter a low-power mode - wfi - dsb @barrier - mov pc, lr -ENDPROC(cpu_pj4b_do_idle) -#else - globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle -#endif - globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area - globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend - globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume - globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size - #endif __CPUINIT @@ -373,9 +350,6 @@ __v7_setup_stack: @ define struct processor (see and proc-macros.S) define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 -#ifdef CONFIG_CPU_PJ4B - define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 -#endif .section ".rodata" @@ -388,7 +362,7 @@ __v7_setup_stack: /* * Standard v7 proc info content */ -.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions +.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ @@ -401,7 +375,7 @@ __v7_setup_stack: .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ HWCAP_EDSP | HWCAP_TLS | \hwcaps .long cpu_v7_name - .long \proc_fns + .long v7_processor_functions .long v7wbi_tlb_fns .long v6_user_fns .long v7_cache_fns @@ -433,14 +407,12 @@ __v7_ca9mp_proc_info: /* * Marvell PJ4B processor. */ -#ifdef CONFIG_CPU_PJ4B .type __v7_pj4b_proc_info, #object __v7_pj4b_proc_info: - .long 0x560f5800 - .long 0xff0fff00 - __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions + .long 0x562f5840 + .long 0xfffffff0 + __v7_proc __v7_pj4b_setup .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info -#endif /* * ARM Ltd. Cortex A7 processor. diff --git a/trunk/arch/arm/plat-orion/common.c b/trunk/arch/arm/plat-orion/common.c index c019b7aaf776..251f827271e9 100644 --- a/trunk/arch/arm/plat-orion/common.c +++ b/trunk/arch/arm/plat-orion/common.c @@ -383,7 +383,7 @@ static struct resource orion_ge10_shared_resources[] = { static struct platform_device orion_ge10_shared = { .name = MV643XX_ETH_SHARED_NAME, - .id = 2, + .id = 1, .dev = { .platform_data = &orion_ge10_shared_data, }, @@ -398,8 +398,8 @@ static struct resource orion_ge10_resources[] = { static struct platform_device orion_ge10 = { .name = MV643XX_ETH_NAME, - .id = 2, - .num_resources = 1, + .id = 1, + .num_resources = 2, .resource = orion_ge10_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), @@ -432,7 +432,7 @@ static struct resource orion_ge11_shared_resources[] = { static struct platform_device orion_ge11_shared = { .name = MV643XX_ETH_SHARED_NAME, - .id = 3, + .id = 1, .dev = { .platform_data = &orion_ge11_shared_data, }, @@ -447,8 +447,8 @@ static struct resource orion_ge11_resources[] = { static struct platform_device orion_ge11 = { .name = MV643XX_ETH_NAME, - .id = 3, - .num_resources = 1, + .id = 1, + .num_resources = 2, .resource = orion_ge11_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), diff --git a/trunk/arch/arm/plat-orion/include/plat/common.h b/trunk/arch/arm/plat-orion/include/plat/common.h index d9a24f605a2b..e06fc5fefa14 100644 --- a/trunk/arch/arm/plat-orion/include/plat/common.h +++ b/trunk/arch/arm/plat-orion/include/plat/common.h @@ -10,7 +10,6 @@ #ifndef __PLAT_COMMON_H #include -#include struct dsa_platform_data; struct mv_sata_platform_data; diff --git a/trunk/arch/arm/plat-samsung/adc.c b/trunk/arch/arm/plat-samsung/adc.c index 79690f2f6d3f..ca07cb1b155a 100644 --- a/trunk/arch/arm/plat-samsung/adc.c +++ b/trunk/arch/arm/plat-samsung/adc.c @@ -381,6 +381,11 @@ static int s3c_adc_probe(struct platform_device *pdev) } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(dev, "failed to find registers\n"); + return -ENXIO; + } + adc->regs = devm_ioremap_resource(dev, regs); if (IS_ERR(adc->regs)) return PTR_ERR(adc->regs); diff --git a/trunk/arch/arm/plat-samsung/devs.c b/trunk/arch/arm/plat-samsung/devs.c index 0f9c3f431a5f..30c2fe243f76 100644 --- a/trunk/arch/arm/plat-samsung/devs.c +++ b/trunk/arch/arm/plat-samsung/devs.c @@ -311,9 +311,9 @@ struct platform_device s5p_device_jpeg = { #ifdef CONFIG_S5P_DEV_FIMD0 static struct resource s5p_fimd0_resource[] = { [0] = DEFINE_RES_MEM(S5P_PA_FIMD0, SZ_32K), - [1] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_VSYNC, "vsync"), - [2] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_FIFO, "fifo"), - [3] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_SYSTEM, "lcd_sys"), + [1] = DEFINE_RES_IRQ(IRQ_FIMD0_VSYNC), + [2] = DEFINE_RES_IRQ(IRQ_FIMD0_FIFO), + [3] = DEFINE_RES_IRQ(IRQ_FIMD0_SYSTEM), }; struct platform_device s5p_device_fimd0 = { diff --git a/trunk/arch/arm/plat-samsung/include/plat/uncompress.h b/trunk/arch/arm/plat-samsung/include/plat/uncompress.h index 02b66d723d1a..438b24846e7f 100644 --- a/trunk/arch/arm/plat-samsung/include/plat/uncompress.h +++ b/trunk/arch/arm/plat-samsung/include/plat/uncompress.h @@ -66,9 +66,6 @@ uart_rd(unsigned int reg) static void putc(int ch) { - if (!config_enabled(CONFIG_DEBUG_LL)) - return; - if (uart_rd(S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) { int level; @@ -121,12 +118,7 @@ static void arch_decomp_error(const char *x) #ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO static inline void arch_enable_uart_fifo(void) { - u32 fifocon; - - if (!config_enabled(CONFIG_DEBUG_LL)) - return; - - fifocon = uart_rd(S3C2410_UFCON); + u32 fifocon = uart_rd(S3C2410_UFCON); if (!(fifocon & S3C2410_UFCON_FIFOMODE)) { fifocon |= S3C2410_UFCON_RESETBOTH; diff --git a/trunk/arch/arm/plat-samsung/pm.c b/trunk/arch/arm/plat-samsung/pm.c index bd7124c87fea..53210ec4e8ec 100644 --- a/trunk/arch/arm/plat-samsung/pm.c +++ b/trunk/arch/arm/plat-samsung/pm.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -262,8 +261,7 @@ static int s3c_pm_enter(suspend_state_t state) * require a full power-cycle) */ - if (!of_have_populated_dt() && - !any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) && + if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) && !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) { printk(KERN_ERR "%s: No wake-up sources!\n", __func__); printk(KERN_ERR "%s: Aborting sleep\n", __func__); @@ -272,11 +270,8 @@ static int s3c_pm_enter(suspend_state_t state) /* save all necessary core registers not covered by the drivers */ - if (!of_have_populated_dt()) { - samsung_pm_save_gpios(); - samsung_pm_saved_gpios(); - } - + samsung_pm_save_gpios(); + samsung_pm_saved_gpios(); s3c_pm_save_uarts(); s3c_pm_save_core(); @@ -315,11 +310,8 @@ static int s3c_pm_enter(suspend_state_t state) s3c_pm_restore_core(); s3c_pm_restore_uarts(); - - if (!of_have_populated_dt()) { - samsung_pm_restore_gpios(); - s3c_pm_restored_gpios(); - } + samsung_pm_restore_gpios(); + s3c_pm_restored_gpios(); s3c_pm_debug_init(); diff --git a/trunk/arch/arm/vfp/entry.S b/trunk/arch/arm/vfp/entry.S index 46e17492fd1f..323ce1a62bbf 100644 --- a/trunk/arch/arm/vfp/entry.S +++ b/trunk/arch/arm/vfp/entry.S @@ -60,7 +60,7 @@ ENTRY(vfp_testing_entry) str r11, [r10, #TI_PREEMPT] #endif ldr r0, VFP_arch_address - str r0, [r0] @ set to non-zero value + str r5, [r0] @ known non-zero value mov pc, r9 @ we have handled the fault ENDPROC(vfp_testing_entry) diff --git a/trunk/arch/arm/xen/enlighten.c b/trunk/arch/arm/xen/enlighten.c index 13609e01f4b7..d30042e39974 100644 --- a/trunk/arch/arm/xen/enlighten.c +++ b/trunk/arch/arm/xen/enlighten.c @@ -152,12 +152,11 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, } EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); -static void __init xen_percpu_init(void *unused) +static int __init xen_secondary_init(unsigned int cpu) { struct vcpu_register_vcpu_info info; struct vcpu_info *vcpup; int err; - int cpu = get_cpu(); pr_info("Xen: initializing cpu%d\n", cpu); vcpup = per_cpu_ptr(xen_vcpu_info, cpu); @@ -166,10 +165,14 @@ static void __init xen_percpu_init(void *unused) info.offset = offset_in_page(vcpup); err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); - BUG_ON(err); - per_cpu(xen_vcpu, cpu) = vcpup; - - enable_percpu_irq(xen_events_irq, 0); + if (err) { + pr_debug("register_vcpu_info failed: err=%d\n", err); + } else { + /* This cpu is using the registered vcpu info, even if + later ones fail to. */ + per_cpu(xen_vcpu, cpu) = vcpup; + } + return 0; } static void xen_restart(char str, const char *cmd) @@ -205,6 +208,7 @@ static int __init xen_guest_init(void) const char *version = NULL; const char *xen_prefix = "xen,xen-"; struct resource res; + int i; node = of_find_compatible_node(NULL, NULL, "xen,xen"); if (!node) { @@ -261,23 +265,19 @@ static int __init xen_guest_init(void) sizeof(struct vcpu_info)); if (xen_vcpu_info == NULL) return -ENOMEM; + for_each_online_cpu(i) + xen_secondary_init(i); gnttab_init(); if (!xen_initial_domain()) xenbus_probe(NULL); - return 0; -} -core_initcall(xen_guest_init); - -static int __init xen_pm_init(void) -{ pm_power_off = xen_power_off; arm_pm_restart = xen_restart; return 0; } -subsys_initcall(xen_pm_init); +core_initcall(xen_guest_init); static irqreturn_t xen_arm_callback(int irq, void *arg) { @@ -285,6 +285,11 @@ static irqreturn_t xen_arm_callback(int irq, void *arg) return IRQ_HANDLED; } +static __init void xen_percpu_enable_events(void *unused) +{ + enable_percpu_irq(xen_events_irq, 0); +} + static int __init xen_init_events(void) { if (!xen_domain() || xen_events_irq < 0) @@ -298,7 +303,7 @@ static int __init xen_init_events(void) return -EINVAL; } - on_each_cpu(xen_percpu_init, NULL, 0); + on_each_cpu(xen_percpu_enable_events, NULL, 0); return 0; } diff --git a/trunk/arch/arm64/Kconfig b/trunk/arch/arm64/Kconfig index 56b3f6d447ae..48347dcf0566 100644 --- a/trunk/arch/arm64/Kconfig +++ b/trunk/arch/arm64/Kconfig @@ -122,6 +122,8 @@ endmenu menu "Kernel Features" +source "kernel/time/Kconfig" + config ARM64_64K_PAGES bool "Enable 64KB pages support" help diff --git a/trunk/arch/arm64/include/asm/assembler.h b/trunk/arch/arm64/include/asm/assembler.h index 5aceb83b3f5c..c8eedc604984 100644 --- a/trunk/arch/arm64/include/asm/assembler.h +++ b/trunk/arch/arm64/include/asm/assembler.h @@ -82,7 +82,7 @@ .macro enable_dbg_if_not_stepping, tmp mrs \tmp, mdscr_el1 - tbnz \tmp, #0, 9990f + tbnz \tmp, #1, 9990f enable_dbg 9990: .endm diff --git a/trunk/arch/arm64/include/asm/pgtable.h b/trunk/arch/arm64/include/asm/pgtable.h index 3a768e96cf0e..e333a243bfcc 100644 --- a/trunk/arch/arm64/include/asm/pgtable.h +++ b/trunk/arch/arm64/include/asm/pgtable.h @@ -320,6 +320,13 @@ extern int kern_addr_valid(unsigned long addr); #include +/* + * remap a physical page `pfn' of size `size' with page protection `prot' + * into virtual address `from' + */ +#define io_remap_pfn_range(vma,from,pfn,size,prot) \ + remap_pfn_range(vma, from, pfn, size, prot) + #define pgtable_cache_init() do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/arm64/kernel/arm64ksyms.c b/trunk/arch/arm64/kernel/arm64ksyms.c index 41b4f626d554..7df1aad29b67 100644 --- a/trunk/arch/arm64/kernel/arm64ksyms.c +++ b/trunk/arch/arm64/kernel/arm64ksyms.c @@ -34,7 +34,6 @@ EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(__copy_to_user); diff --git a/trunk/arch/arm64/kernel/debug-monitors.c b/trunk/arch/arm64/kernel/debug-monitors.c index f4726dc054b3..0c3ba9f51376 100644 --- a/trunk/arch/arm64/kernel/debug-monitors.c +++ b/trunk/arch/arm64/kernel/debug-monitors.c @@ -136,6 +136,8 @@ void disable_debug_monitors(enum debug_el el) */ static void clear_os_lock(void *unused) { + asm volatile("msr mdscr_el1, %0" : : "r" (0)); + isb(); asm volatile("msr oslar_el1, %0" : : "r" (0)); isb(); } diff --git a/trunk/arch/arm64/kernel/early_printk.c b/trunk/arch/arm64/kernel/early_printk.c index fbb6e1843659..ac974f48a7a2 100644 --- a/trunk/arch/arm64/kernel/early_printk.c +++ b/trunk/arch/arm64/kernel/early_printk.c @@ -95,7 +95,7 @@ static void early_write(struct console *con, const char *s, unsigned n) } } -static struct console early_console_dev = { +static struct console early_console = { .name = "earlycon", .write = early_write, .flags = CON_PRINTBUFFER | CON_BOOT, @@ -145,8 +145,7 @@ static int __init setup_early_printk(char *buf) early_base = early_io_map(paddr, EARLYCON_IOBASE); printch = match->printch; - early_console = &early_console_dev; - register_console(&early_console_dev); + register_console(&early_console); return 0; } diff --git a/trunk/arch/arm64/kernel/entry.S b/trunk/arch/arm64/kernel/entry.S index 1d1314280a03..c7e047049f2c 100644 --- a/trunk/arch/arm64/kernel/entry.S +++ b/trunk/arch/arm64/kernel/entry.S @@ -390,16 +390,6 @@ el0_sync_compat: b.eq el0_fpsimd_exc cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef - cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap - b.eq el0_undef - cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap - b.eq el0_undef - cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap - b.eq el0_undef - cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap - b.eq el0_undef - cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap - b.eq el0_undef cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0 b.ge el0_dbg b el0_inv diff --git a/trunk/arch/arm64/kernel/perf_event.c b/trunk/arch/arm64/kernel/perf_event.c index 9ba33c40cdf8..1e49e5eb81e9 100644 --- a/trunk/arch/arm64/kernel/perf_event.c +++ b/trunk/arch/arm64/kernel/perf_event.c @@ -1336,7 +1336,6 @@ void perf_callchain_user(struct perf_callchain_entry *entry, return; } - perf_callchain_store(entry, regs->pc); tail = (struct frame_tail __user *)regs->regs[29]; while (entry->nr < PERF_MAX_STACK_DEPTH && diff --git a/trunk/arch/arm64/kernel/setup.c b/trunk/arch/arm64/kernel/setup.c index add6ea616843..6a9a53292590 100644 --- a/trunk/arch/arm64/kernel/setup.c +++ b/trunk/arch/arm64/kernel/setup.c @@ -282,13 +282,12 @@ void __init setup_arch(char **cmdline_p) #endif } -static int __init arm64_device_init(void) +static int __init arm64_of_clk_init(void) { of_clk_init(NULL); - of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); return 0; } -arch_initcall(arm64_device_init); +arch_initcall(arm64_of_clk_init); static DEFINE_PER_CPU(struct cpu, cpu_data); @@ -306,6 +305,13 @@ static int __init topology_init(void) } subsys_initcall(topology_init); +static int __init arm64_device_probe(void) +{ + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); + return 0; +} +device_initcall(arm64_device_probe); + static const char *hwcap_str[] = { "fp", "asimd", diff --git a/trunk/arch/arm64/kernel/traps.c b/trunk/arch/arm64/kernel/traps.c index f30852d28590..61d7dd29f756 100644 --- a/trunk/arch/arm64/kernel/traps.c +++ b/trunk/arch/arm64/kernel/traps.c @@ -267,8 +267,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) return; #endif - if (show_unhandled_signals && unhandled_signal(current, SIGILL) && - printk_ratelimit()) { + if (show_unhandled_signals) { pr_info("%s[%d]: undefined instruction: pc=%p\n", current->comm, task_pid_nr(current), pc); dump_instr(KERN_INFO, regs); @@ -295,7 +294,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) } #endif - if (show_unhandled_signals && printk_ratelimit()) { + if (show_unhandled_signals) { pr_info("%s[%d]: syscall %d\n", current->comm, task_pid_nr(current), (int)regs->syscallno); dump_instr("", regs); @@ -311,20 +310,14 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) */ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) { - siginfo_t info; - void __user *pc = (void __user *)instruction_pointer(regs); console_verbose(); pr_crit("Bad mode in %s handler detected, code 0x%08x\n", handler[reason], esr); - __show_regs(regs); - - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLOPC; - info.si_addr = pc; - arm64_notify_die("Oops - bad mode", regs, &info, 0); + die("Oops - bad mode", regs, 0); + local_irq_disable(); + panic("bad mode"); } void __pte_error(const char *file, int line, unsigned long val) diff --git a/trunk/arch/arm64/mm/cache.S b/trunk/arch/arm64/mm/cache.S index 48a386094fa3..abe69b80cf7f 100644 --- a/trunk/arch/arm64/mm/cache.S +++ b/trunk/arch/arm64/mm/cache.S @@ -52,7 +52,7 @@ loop1: add x2, x2, #4 // add 4 (line length offset) mov x4, #0x3ff and x4, x4, x1, lsr #3 // find maximum number on the way size - clz w5, w4 // find bit position of way size increment + clz x5, x4 // find bit position of way size increment mov x7, #0x7fff and x7, x7, x1, lsr #13 // extract max number of the index size loop2: diff --git a/trunk/arch/arm64/mm/fault.c b/trunk/arch/arm64/mm/fault.c index 1426468b77f3..98af6e760cce 100644 --- a/trunk/arch/arm64/mm/fault.c +++ b/trunk/arch/arm64/mm/fault.c @@ -113,8 +113,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, { struct siginfo si; - if (show_unhandled_signals && unhandled_signal(tsk, sig) && - printk_ratelimit()) { + if (show_unhandled_signals) { pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, addr, esr); diff --git a/trunk/arch/arm64/mm/proc.S b/trunk/arch/arm64/mm/proc.S index a82ae8868077..f1d8b9bbfdad 100644 --- a/trunk/arch/arm64/mm/proc.S +++ b/trunk/arch/arm64/mm/proc.S @@ -119,7 +119,8 @@ ENTRY(__cpu_setup) mov x0, #3 << 20 msr cpacr_el1, x0 // Enable FP/ASIMD - msr mdscr_el1, xzr // Reset mdscr_el1 + mov x0, #1 + msr oslar_el1, x0 // Set the debug OS lock tlbi vmalle1is // invalidate I + D TLBs /* * Memory region attributes for LPAE: diff --git a/trunk/arch/avr32/Kconfig b/trunk/arch/avr32/Kconfig index 549903cfc2cb..bdc35589277f 100644 --- a/trunk/arch/avr32/Kconfig +++ b/trunk/arch/avr32/Kconfig @@ -205,11 +205,6 @@ config ARCH_DISCONTIGMEM_ENABLE config ARCH_SPARSEMEM_ENABLE def_bool n -config NODES_SHIFT - int - default "2" - depends on NEED_MULTIPLE_NODES - source "mm/Kconfig" config OWNERSHIP_TRACE diff --git a/trunk/arch/avr32/include/asm/Kbuild b/trunk/arch/avr32/include/asm/Kbuild index d22af851f3f6..4dd4f78d3dcc 100644 --- a/trunk/arch/avr32/include/asm/Kbuild +++ b/trunk/arch/avr32/include/asm/Kbuild @@ -2,4 +2,3 @@ generic-y += clkdev.h generic-y += exec.h generic-y += trace_clock.h -generic-y += param.h diff --git a/trunk/arch/avr32/include/asm/numnodes.h b/trunk/arch/avr32/include/asm/numnodes.h new file mode 100644 index 000000000000..0b864d7ce330 --- /dev/null +++ b/trunk/arch/avr32/include/asm/numnodes.h @@ -0,0 +1,7 @@ +#ifndef __ASM_AVR32_NUMNODES_H +#define __ASM_AVR32_NUMNODES_H + +/* Max 4 nodes */ +#define NODES_SHIFT 2 + +#endif /* __ASM_AVR32_NUMNODES_H */ diff --git a/trunk/arch/avr32/include/asm/param.h b/trunk/arch/avr32/include/asm/param.h new file mode 100644 index 000000000000..009a167aea1f --- /dev/null +++ b/trunk/arch/avr32/include/asm/param.h @@ -0,0 +1,9 @@ +#ifndef __ASM_AVR32_PARAM_H +#define __ASM_AVR32_PARAM_H + +#include + +# define HZ CONFIG_HZ +# define USER_HZ 100 /* User interfaces are in "ticks" */ +# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */ +#endif /* __ASM_AVR32_PARAM_H */ diff --git a/trunk/arch/avr32/include/asm/pgtable.h b/trunk/arch/avr32/include/asm/pgtable.h index 4beff97e2033..6fbfea61f7bb 100644 --- a/trunk/arch/avr32/include/asm/pgtable.h +++ b/trunk/arch/avr32/include/asm/pgtable.h @@ -362,6 +362,9 @@ typedef pte_t *pte_addr_t; #define kern_addr_valid(addr) (1) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + /* No page table caches to initialize (?) */ #define pgtable_cache_init() do { } while(0) diff --git a/trunk/arch/avr32/include/uapi/asm/Kbuild b/trunk/arch/avr32/include/uapi/asm/Kbuild index 3b85eaddf525..df53e7a46774 100644 --- a/trunk/arch/avr32/include/uapi/asm/Kbuild +++ b/trunk/arch/avr32/include/uapi/asm/Kbuild @@ -33,4 +33,3 @@ header-y += termbits.h header-y += termios.h header-y += types.h header-y += unistd.h -generic-y += param.h diff --git a/trunk/arch/avr32/include/uapi/asm/param.h b/trunk/arch/avr32/include/uapi/asm/param.h new file mode 100644 index 000000000000..d28aa5ee6d37 --- /dev/null +++ b/trunk/arch/avr32/include/uapi/asm/param.h @@ -0,0 +1,18 @@ +#ifndef _UAPI__ASM_AVR32_PARAM_H +#define _UAPI__ASM_AVR32_PARAM_H + + +#ifndef HZ +# define HZ 100 +#endif + +/* TODO: Should be configurable */ +#define EXEC_PAGESIZE 4096 + +#ifndef NOGROUP +# define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 + +#endif /* _UAPI__ASM_AVR32_PARAM_H */ diff --git a/trunk/arch/avr32/kernel/module.c b/trunk/arch/avr32/kernel/module.c index 2c9412908024..596f7305d93f 100644 --- a/trunk/arch/avr32/kernel/module.c +++ b/trunk/arch/avr32/kernel/module.c @@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, break; case R_AVR32_GOT18SW: if ((relocation & 0xfffe0003) != 0 - && (relocation & 0xfffc0000) != 0xfffc0000) + && (relocation & 0xfffc0003) != 0xffff0000) return reloc_overflow(module, "R_AVR32_GOT18SW", relocation); relocation >>= 2; diff --git a/trunk/arch/blackfin/include/asm/pgtable.h b/trunk/arch/blackfin/include/asm/pgtable.h index 0b049019eba7..b8663921d3c1 100644 --- a/trunk/arch/blackfin/include/asm/pgtable.h +++ b/trunk/arch/blackfin/include/asm/pgtable.h @@ -88,6 +88,7 @@ extern char empty_zero_page[]; * No page table caches to initialise. */ #define pgtable_cache_init() do { } while (0) +#define io_remap_pfn_range remap_pfn_range /* * All 32bit addresses are effectively valid for vmalloc... diff --git a/trunk/arch/c6x/include/asm/pgtable.h b/trunk/arch/c6x/include/asm/pgtable.h index c0eed5b18860..38a4312eb2cb 100644 --- a/trunk/arch/c6x/include/asm/pgtable.h +++ b/trunk/arch/c6x/include/asm/pgtable.h @@ -71,6 +71,7 @@ extern unsigned long empty_zero_page; * No page table caches to initialise */ #define pgtable_cache_init() do { } while (0) +#define io_remap_pfn_range remap_pfn_range #include diff --git a/trunk/arch/cris/include/asm/pgtable.h b/trunk/arch/cris/include/asm/pgtable.h index 8b8c86793225..7df430138355 100644 --- a/trunk/arch/cris/include/asm/pgtable.h +++ b/trunk/arch/cris/include/asm/pgtable.h @@ -258,6 +258,9 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ diff --git a/trunk/arch/frv/include/asm/pgtable.h b/trunk/arch/frv/include/asm/pgtable.h index eb0110acd19b..6bc241e4b4f8 100644 --- a/trunk/arch/frv/include/asm/pgtable.h +++ b/trunk/arch/frv/include/asm/pgtable.h @@ -488,6 +488,9 @@ static inline int pte_file(pte_t pte) #define PageSkip(page) (0) #define kern_addr_valid(addr) (1) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT diff --git a/trunk/arch/h8300/include/asm/pgtable.h b/trunk/arch/h8300/include/asm/pgtable.h index 7ca20f894dd7..62ef17676b40 100644 --- a/trunk/arch/h8300/include/asm/pgtable.h +++ b/trunk/arch/h8300/include/asm/pgtable.h @@ -52,6 +52,9 @@ extern int is_in_rom(unsigned long); */ #define pgtable_cache_init() do { } while (0) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + /* * All 32bit addresses are effectively valid for vmalloc... * Sort of meaningless for non-VM targets. diff --git a/trunk/arch/hexagon/include/asm/pgtable.h b/trunk/arch/hexagon/include/asm/pgtable.h index d8bd54fa431e..20d55f69fe55 100644 --- a/trunk/arch/hexagon/include/asm/pgtable.h +++ b/trunk/arch/hexagon/include/asm/pgtable.h @@ -452,6 +452,10 @@ static inline int pte_exec(pte_t pte) #define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +/* Nothing special about IO remapping at this point */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + /* I think this is in case we have page table caches; needed by init/main.c */ #define pgtable_cache_init() do { } while (0) diff --git a/trunk/arch/ia64/include/asm/irqflags.h b/trunk/arch/ia64/include/asm/irqflags.h index cec6c06b52c0..1bf2cf2f4ab4 100644 --- a/trunk/arch/ia64/include/asm/irqflags.h +++ b/trunk/arch/ia64/include/asm/irqflags.h @@ -11,7 +11,6 @@ #define _ASM_IA64_IRQFLAGS_H #include -#include #ifdef CONFIG_IA64_DEBUG_IRQ extern unsigned long last_cli_ip; diff --git a/trunk/arch/ia64/include/asm/pgtable.h b/trunk/arch/ia64/include/asm/pgtable.h index 7935115398a6..815810cbbedc 100644 --- a/trunk/arch/ia64/include/asm/pgtable.h +++ b/trunk/arch/ia64/include/asm/pgtable.h @@ -493,6 +493,9 @@ extern void paging_init (void); #define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3) #define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE }) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. diff --git a/trunk/arch/ia64/include/asm/tlb.h b/trunk/arch/ia64/include/asm/tlb.h index ef3a9de01954..c3ffe3e54edc 100644 --- a/trunk/arch/ia64/include/asm/tlb.h +++ b/trunk/arch/ia64/include/asm/tlb.h @@ -46,6 +46,12 @@ #include #include +#ifdef CONFIG_SMP +# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) +#else +# define tlb_fast_mode(tlb) (1) +#endif + /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. @@ -54,7 +60,7 @@ struct mmu_gather { struct mm_struct *mm; - unsigned int nr; + unsigned int nr; /* == ~0U => fast mode */ unsigned int max; unsigned char fullmm; /* non-zero means full mm flush */ unsigned char need_flush; /* really unmapped some PTEs? */ @@ -97,7 +103,6 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; static inline void ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) { - unsigned long i; unsigned int nr; if (!tlb->need_flush) @@ -136,11 +141,13 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e /* lastly, release the freed pages */ nr = tlb->nr; - - tlb->nr = 0; - tlb->start_addr = ~0UL; - for (i = 0; i < nr; ++i) - free_page_and_swap_cache(tlb->pages[i]); + if (!tlb_fast_mode(tlb)) { + unsigned long i; + tlb->nr = 0; + tlb->start_addr = ~0UL; + for (i = 0; i < nr; ++i) + free_page_and_swap_cache(tlb->pages[i]); + } } static inline void __tlb_alloc_page(struct mmu_gather *tlb) @@ -160,7 +167,20 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m tlb->mm = mm; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; - tlb->nr = 0; + /* + * Use fast mode if only 1 CPU is online. + * + * It would be tempting to turn on fast-mode for full_mm_flush as well. But this + * doesn't work because of speculative accesses and software prefetching: the page + * table of "mm" may (and usually is) the currently active page table and even + * though the kernel won't do any user-space accesses during the TLB shoot down, a + * compiler might use speculation or lfetch.fault on what happens to be a valid + * user-space address. This in turn could trigger a TLB miss fault (or a VHPT + * walk) and re-insert a TLB entry we just removed. Slow mode avoids such + * problems. (We could make fast-mode work by switching the current task to a + * different "mm" during the shootdown.) --davidm 08/02/2002 + */ + tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; tlb->fullmm = full_mm_flush; tlb->start_addr = ~0UL; } @@ -194,6 +214,11 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { tlb->need_flush = 1; + if (tlb_fast_mode(tlb)) { + free_page_and_swap_cache(page); + return 1; /* avoid calling tlb_flush_mmu */ + } + if (!tlb->nr && tlb->pages == tlb->local) __tlb_alloc_page(tlb); diff --git a/trunk/arch/m32r/include/asm/pgtable.h b/trunk/arch/m32r/include/asm/pgtable.h index 103ce6710f07..8a28cfea2729 100644 --- a/trunk/arch/m32r/include/asm/pgtable.h +++ b/trunk/arch/m32r/include/asm/pgtable.h @@ -347,6 +347,9 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ #define kern_addr_valid(addr) (1) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT diff --git a/trunk/arch/m68k/configs/amiga_defconfig b/trunk/arch/m68k/configs/amiga_defconfig index 19325e117eea..90d3109c82f4 100644 --- a/trunk/arch/m68k/configs/amiga_defconfig +++ b/trunk/arch/m68k/configs/amiga_defconfig @@ -1,78 +1,55 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-amiga" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_SUN_PARTITION=y -# CONFIG_EFI_PARTITION is not set -CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m +CONFIG_AMIGA=y CONFIG_M68020=y CONFIG_M68030=y CONFIG_M68040=y CONFIG_M68060=y -CONFIG_AMIGA=y +CONFIG_BINFMT_AOUT=m +CONFIG_BINFMT_MISC=m CONFIG_ZORRO=y CONFIG_AMIGA_PCMCIA=y +CONFIG_HEARTBEAT=y +CONFIG_PROC_HARDWARE=y CONFIG_ZORRO_NAMES=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_AOUT=m -CONFIG_BINFMT_MISC=m CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -80,37 +57,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -121,8 +86,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -136,31 +99,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -170,6 +124,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -178,30 +133,18 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_PARPORT=m CONFIG_PARPORT_AMIGA=m @@ -211,13 +154,11 @@ CONFIG_AMIGA_FLOPPY=y CONFIG_AMIGA_Z2RAM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_IDE=y -CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_GAYLE=y CONFIG_BLK_DEV_BUDDHA=y @@ -231,77 +172,57 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_A3000_SCSI=y CONFIG_A2091_SCSI=y CONFIG_GVP11_SCSI=y CONFIG_SCSI_A4000T=y CONFIG_SCSI_ZORRO7XX=y CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_3COM is not set -CONFIG_A2065=y +CONFIG_NET_ETHERNET=y CONFIG_ARIADNE=y -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_FUJITSU is not set -# CONFIG_NET_VENDOR_HP is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_A2065=y CONFIG_HYDRA=y -CONFIG_APNE=y CONFIG_ZORRO8390=y -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_APNE=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m CONFIG_KEYBOARD_AMIGA=y # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_MOUSE_PS2 is not set @@ -312,14 +233,11 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_PARPORT=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FB_CIRRUS=y @@ -334,64 +252,48 @@ CONFIG_SOUND=m CONFIG_DMASOUND_PAULA=m CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_RP5C01=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_HEARTBEAT=y -CONFIG_PROC_HARDWARE=y CONFIG_AMIGA_BUILTIN_SERIAL=y CONFIG_SERIAL_CONSOLE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y -CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -430,23 +332,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -456,16 +345,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -481,14 +373,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=m CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/configs/apollo_defconfig b/trunk/arch/m68k/configs/apollo_defconfig index 14dc6ccda7f4..8f4f657fdbc6 100644 --- a/trunk/arch/m68k/configs/apollo_defconfig +++ b/trunk/arch/m68k/configs/apollo_defconfig @@ -1,76 +1,55 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-apollo" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_SUN_PARTITION=y -# CONFIG_EFI_PARTITION is not set -CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m +CONFIG_APOLLO=y CONFIG_M68020=y CONFIG_M68030=y CONFIG_M68040=y CONFIG_M68060=y -CONFIG_APOLLO=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +CONFIG_HEARTBEAT=y +CONFIG_PROC_HARDWARE=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -78,37 +57,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -119,8 +86,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -134,31 +99,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -168,6 +124,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -176,34 +133,21 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m @@ -218,74 +162,57 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_ETHERNET=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m # CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_PS2=m CONFIG_MOUSE_SERIAL=m CONFIG_SERIO=m +# CONFIG_SERIO_SERPORT is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y @@ -294,61 +221,47 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_HEARTBEAT=y -CONFIG_PROC_HARDWARE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -387,23 +300,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -413,16 +313,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -438,14 +341,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=m CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/configs/atari_defconfig b/trunk/arch/m68k/configs/atari_defconfig index 6d5370c914b2..4571d33903fe 100644 --- a/trunk/arch/m68k/configs/atari_defconfig +++ b/trunk/arch/m68k/configs/atari_defconfig @@ -1,75 +1,53 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-atari" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_AMIGA_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_SUN_PARTITION=y -# CONFIG_EFI_PARTITION is not set -CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m +CONFIG_ATARI=y CONFIG_M68020=y CONFIG_M68030=y CONFIG_M68040=y CONFIG_M68060=y -CONFIG_ATARI=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +CONFIG_STRAM_PROC=y +CONFIG_HEARTBEAT=y +CONFIG_PROC_HARDWARE=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -77,37 +55,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -118,8 +84,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -133,31 +97,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -167,6 +122,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -175,30 +131,18 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_PARPORT=m CONFIG_PARPORT_ATARI=m @@ -206,13 +150,11 @@ CONFIG_PARPORT_1284=y CONFIG_ATARI_FLOPPY=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_IDE=y -CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_FALCON_IDE=y CONFIG_RAID_ATTRS=m @@ -225,81 +167,63 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_ATARI_SCSI=y CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_MII=y -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +CONFIG_NET_ETHERNET=y +CONFIG_MII=y CONFIG_ATARILANCE=y -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m CONFIG_KEYBOARD_ATARI=y # CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_PS2=m CONFIG_MOUSE_ATARI=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m -# CONFIG_SERIO is not set +# CONFIG_SERIO_SERPORT is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_PARPORT=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FB_ATARI=y @@ -309,64 +233,47 @@ CONFIG_SOUND=m CONFIG_DMASOUND_ATARI=m CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_HEARTBEAT=y -CONFIG_PROC_HARDWARE=y -CONFIG_NATFEAT=y -CONFIG_NFBLOCK=y -CONFIG_NFCON=y -CONFIG_NFETH=y +# CONFIG_USB_SUPPORT is not set CONFIG_ATARI_DSP56K=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y -CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -405,23 +312,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -431,16 +325,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -456,14 +353,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=y CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/configs/bvme6000_defconfig b/trunk/arch/m68k/configs/bvme6000_defconfig index c015ddb6fd80..12f211733ba0 100644 --- a/trunk/arch/m68k/configs/bvme6000_defconfig +++ b/trunk/arch/m68k/configs/bvme6000_defconfig @@ -1,74 +1,53 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-bvme6000" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_SUN_PARTITION=y -# CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m -CONFIG_M68040=y -CONFIG_M68060=y CONFIG_VME=y CONFIG_BVME6000=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_M68040=y +CONFIG_M68060=y CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +CONFIG_PROC_HARDWARE=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -76,37 +55,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -117,8 +84,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -132,31 +97,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -166,6 +122,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -174,34 +131,21 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m @@ -216,131 +160,103 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_BVME6000_SCSI=y CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set +CONFIG_NET_ETHERNET=y CONFIG_BVME6000_NET=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m # CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_MOUSE_PS2 is not set -# CONFIG_SERIO is not set +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_SERIAL=m +CONFIG_SERIO=m +# CONFIG_SERIO_SERPORT is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_PROC_HARDWARE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -379,23 +295,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -405,16 +308,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -430,14 +336,7 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=m CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m +CONFIG_CRC32=m diff --git a/trunk/arch/m68k/configs/hp300_defconfig b/trunk/arch/m68k/configs/hp300_defconfig index ec7382d8afff..215389a5407f 100644 --- a/trunk/arch/m68k/configs/hp300_defconfig +++ b/trunk/arch/m68k/configs/hp300_defconfig @@ -1,76 +1,54 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-hp300" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_SUN_PARTITION=y -# CONFIG_EFI_PARTITION is not set -CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m +CONFIG_HP300=y CONFIG_M68020=y CONFIG_M68030=y CONFIG_M68040=y CONFIG_M68060=y -CONFIG_HP300=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +CONFIG_PROC_HARDWARE=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -78,37 +56,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -119,8 +85,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -134,31 +98,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -168,6 +123,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -176,34 +132,21 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m @@ -218,77 +161,59 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +CONFIG_NET_ETHERNET=y CONFIG_HPLANCE=y -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m # CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_PS2=m CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_HP_SDC_RTC=m -CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_SERPORT is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y @@ -297,60 +222,47 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_PROC_HARDWARE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -389,23 +301,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -415,16 +314,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -440,14 +342,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=m CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/configs/mac_defconfig b/trunk/arch/m68k/configs/mac_defconfig index 7d46fbec7042..cb9dfb30b674 100644 --- a/trunk/arch/m68k/configs/mac_defconfig +++ b/trunk/arch/m68k/configs/mac_defconfig @@ -1,75 +1,49 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-mac" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_SUN_PARTITION=y -# CONFIG_EFI_PARTITION is not set -CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m +CONFIG_MAC=y CONFIG_M68020=y CONFIG_M68030=y CONFIG_M68040=y -CONFIG_M68KFPU_EMU=y -CONFIG_MAC=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +CONFIG_PROC_HARDWARE=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -77,37 +51,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -118,8 +80,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -133,31 +93,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -167,6 +118,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -175,45 +127,31 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y CONFIG_IPDDP_DECAP=y -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m -CONFIG_BLK_DEV_SWIM=m +CONFIG_BLK_DEV_SWIM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_IDE=y -CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_MAC_IDE=y CONFIG_RAID_ATTRS=m @@ -226,30 +164,29 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_MAC_SCSI=y CONFIG_SCSI_MAC_ESP=y CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_ADB=y CONFIG_ADB_MACII=y +CONFIG_ADB_MACIISI=y CONFIG_ADB_IOP=y CONFIG_ADB_PMU68K=y CONFIG_ADB_CUDA=y @@ -257,61 +194,46 @@ CONFIG_INPUT_ADBHID=y CONFIG_MAC_EMUMOUSEBTN=y CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -CONFIG_MACMACE=y -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -CONFIG_MAC89x0=y -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -CONFIG_MACSONIC=y +CONFIG_NET_ETHERNET=y CONFIG_MAC8390=y -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_MAC89x0=m +CONFIG_MACSONIC=m +CONFIG_MACMACE=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m # CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_PS2=m CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m CONFIG_SERIO=m +# CONFIG_SERIO_SERPORT is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_PMACZILOG=y CONFIG_SERIAL_PMACZILOG_TTYS=y CONFIG_SERIAL_PMACZILOG_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FB_VALKYRIE=y @@ -320,60 +242,46 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_PROC_HARDWARE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m +CONFIG_HFS_FS=y +CONFIG_HFSPLUS_FS=y CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m -CONFIG_NFS_FS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y -CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -412,23 +320,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -438,16 +333,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -463,14 +361,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=m CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/configs/multi_defconfig b/trunk/arch/m68k/configs/multi_defconfig index 0f795d8e65fa..8d5def4a31e0 100644 --- a/trunk/arch/m68k/configs/multi_defconfig +++ b/trunk/arch/m68k/configs/multi_defconfig @@ -1,29 +1,15 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-multi" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -# CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m -CONFIG_M68020=y -CONFIG_M68040=y -CONFIG_M68060=y -CONFIG_M68KFPU_EMU=y CONFIG_AMIGA=y CONFIG_ATARI=y CONFIG_MAC=y @@ -35,50 +21,48 @@ CONFIG_BVME6000=y CONFIG_HP300=y CONFIG_SUN3X=y CONFIG_Q40=y +CONFIG_M68020=y +CONFIG_M68040=y +CONFIG_M68060=y +CONFIG_BINFMT_AOUT=m +CONFIG_BINFMT_MISC=m CONFIG_ZORRO=y CONFIG_AMIGA_PCMCIA=y +CONFIG_STRAM_PROC=y +CONFIG_HEARTBEAT=y +CONFIG_PROC_HARDWARE=y CONFIG_ZORRO_NAMES=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_AOUT=m -CONFIG_BINFMT_MISC=m CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -86,37 +70,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -127,8 +99,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -142,31 +112,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -176,6 +137,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -184,34 +146,22 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y CONFIG_IPDDP_DECAP=y -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_PARPORT=m CONFIG_PARPORT_AMIGA=m @@ -220,17 +170,15 @@ CONFIG_PARPORT_ATARI=m CONFIG_PARPORT_1284=y CONFIG_AMIGA_FLOPPY=y CONFIG_ATARI_FLOPPY=y -CONFIG_BLK_DEV_SWIM=m +CONFIG_BLK_DEV_SWIM=y CONFIG_AMIGA_Z2RAM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_IDE=y -CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_GAYLE=y CONFIG_BLK_DEV_BUDDHA=y @@ -247,9 +195,11 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_A3000_SCSI=y CONFIG_A2091_SCSI=y CONFIG_GVP11_SCSI=y @@ -263,24 +213,21 @@ CONFIG_MVME16x_SCSI=y CONFIG_BVME6000_SCSI=y CONFIG_SUN3X_ESP=y CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_ADB=y CONFIG_ADB_MACII=y +CONFIG_ADB_MACIISI=y CONFIG_ADB_IOP=y CONFIG_ADB_PMU68K=y CONFIG_ADB_CUDA=y @@ -288,64 +235,49 @@ CONFIG_INPUT_ADBHID=y CONFIG_MAC_EMUMOUSEBTN=y CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_MII=y -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_3COM is not set -CONFIG_A2065=y +CONFIG_NET_ETHERNET=y +CONFIG_MII=y CONFIG_ARIADNE=y -CONFIG_ATARILANCE=y -CONFIG_HPLANCE=y -CONFIG_MVME147_NET=y -CONFIG_SUN3LANCE=y -CONFIG_MACMACE=y -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -CONFIG_MAC89x0=y -# CONFIG_NET_VENDOR_FUJITSU is not set -# CONFIG_NET_VENDOR_HP is not set -CONFIG_BVME6000_NET=y -CONFIG_MVME16x_NET=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -CONFIG_MACSONIC=y +CONFIG_A2065=y CONFIG_HYDRA=y +CONFIG_ZORRO8390=y +CONFIG_APNE=y CONFIG_MAC8390=y +CONFIG_MAC89x0=y +CONFIG_MACSONIC=y +CONFIG_MACMACE=y +CONFIG_MVME147_NET=y +CONFIG_MVME16x_NET=y +CONFIG_BVME6000_NET=y +CONFIG_ATARILANCE=y +CONFIG_SUN3LANCE=y +CONFIG_HPLANCE=y CONFIG_NE2000=m -CONFIG_APNE=y -CONFIG_ZORRO8390=y -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m CONFIG_KEYBOARD_AMIGA=y CONFIG_KEYBOARD_ATARI=y # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_SUNKBD=y -# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_PS2=m CONFIG_MOUSE_SERIAL=m CONFIG_MOUSE_AMIGA=m CONFIG_MOUSE_ATARI=m @@ -353,20 +285,18 @@ CONFIG_INPUT_JOYSTICK=y CONFIG_JOYSTICK_AMIGA=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m -CONFIG_HP_SDC_RTC=m +CONFIG_HP_SDC_RTC=y +# CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_Q40KBD=y CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_PMACZILOG=y CONFIG_SERIAL_PMACZILOG_TTYS=y CONFIG_SERIAL_PMACZILOG_CONSOLE=y CONFIG_PRINTER=m # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_PARPORT=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=y +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FB_CIRRUS=y @@ -386,20 +316,7 @@ CONFIG_DMASOUND_PAULA=m CONFIG_DMASOUND_Q40=m CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_RP5C01=m -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_HEARTBEAT=y -CONFIG_PROC_HARDWARE=y -CONFIG_NATFEAT=y -CONFIG_NFBLOCK=y -CONFIG_NFCON=y -CONFIG_NFETH=y CONFIG_ATARI_DSP56K=m CONFIG_AMIGA_BUILTIN_SERIAL=y CONFIG_SERIAL_CONSOLE=y @@ -407,49 +324,42 @@ CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m +CONFIG_HFS_FS=y +CONFIG_HFSPLUS_FS=y CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -488,23 +398,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -514,16 +411,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -539,14 +439,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=y CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/configs/mvme147_defconfig b/trunk/arch/m68k/configs/mvme147_defconfig index 5586c6529fce..e2af46f530c1 100644 --- a/trunk/arch/m68k/configs/mvme147_defconfig +++ b/trunk/arch/m68k/configs/mvme147_defconfig @@ -1,73 +1,52 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-mvme147" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_SUN_PARTITION=y -# CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m -CONFIG_M68030=y CONFIG_VME=y CONFIG_MVME147=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_M68030=y CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +CONFIG_PROC_HARDWARE=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -75,37 +54,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -116,8 +83,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -131,31 +96,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -165,6 +121,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -173,34 +130,21 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m @@ -215,132 +159,103 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_MVME147_SCSI=y CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +CONFIG_NET_ETHERNET=y CONFIG_MVME147_NET=y -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m # CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_MOUSE_PS2 is not set -# CONFIG_SERIO is not set +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_SERIAL=m +CONFIG_SERIO=m +# CONFIG_SERIO_SERPORT is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_PROC_HARDWARE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -379,23 +294,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -405,16 +307,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -430,14 +335,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=m CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/configs/mvme16x_defconfig b/trunk/arch/m68k/configs/mvme16x_defconfig index e5e8262bbacd..7c9402b2097f 100644 --- a/trunk/arch/m68k/configs/mvme16x_defconfig +++ b/trunk/arch/m68k/configs/mvme16x_defconfig @@ -1,74 +1,53 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-mvme16x" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_SUN_PARTITION=y -# CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m -CONFIG_M68040=y -CONFIG_M68060=y CONFIG_VME=y CONFIG_MVME16x=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_M68040=y +CONFIG_M68060=y CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +CONFIG_PROC_HARDWARE=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -76,37 +55,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -117,8 +84,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -132,31 +97,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -166,6 +122,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -174,34 +131,21 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m @@ -216,131 +160,103 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_MVME16x_SCSI=y CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set +CONFIG_NET_ETHERNET=y CONFIG_MVME16x_NET=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m # CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_MOUSE_PS2 is not set -# CONFIG_SERIO is not set +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_SERIAL=m +CONFIG_SERIO=m +# CONFIG_SERIO_SERPORT is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_PROC_HARDWARE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -379,23 +295,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -405,16 +308,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -430,14 +336,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=m CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/configs/q40_defconfig b/trunk/arch/m68k/configs/q40_defconfig index 8982370e8b42..19d23db690a4 100644 --- a/trunk/arch/m68k/configs/q40_defconfig +++ b/trunk/arch/m68k/configs/q40_defconfig @@ -1,74 +1,49 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-q40" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_SUN_PARTITION=y -# CONFIG_EFI_PARTITION is not set -CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m +CONFIG_Q40=y CONFIG_M68040=y CONFIG_M68060=y -CONFIG_Q40=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +CONFIG_HEARTBEAT=y +CONFIG_PROC_HARDWARE=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -76,37 +51,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -117,8 +80,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -132,31 +93,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -166,6 +118,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -174,40 +127,26 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_IDE=y -CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_Q40IDE=y CONFIG_RAID_ATTRS=m @@ -220,82 +159,61 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_FUJITSU is not set -# CONFIG_NET_VENDOR_HP is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_NET_ETHERNET=y CONFIG_NE2000=m -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m # CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_PS2=m CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m -CONFIG_SERIO_Q40KBD=y +CONFIG_SERIO=m +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_Q40KBD=m CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y @@ -304,61 +222,46 @@ CONFIG_SOUND=m CONFIG_DMASOUND_Q40=m CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_HEARTBEAT=y -CONFIG_PROC_HARDWARE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y -CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -397,23 +300,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -423,16 +313,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -448,14 +341,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=m CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/configs/sun3_defconfig b/trunk/arch/m68k/configs/sun3_defconfig index 54674d61e001..ca6c0b4cab77 100644 --- a/trunk/arch/m68k/configs/sun3_defconfig +++ b/trunk/arch/m68k/configs/sun3_defconfig @@ -1,71 +1,50 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-sun3" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -# CONFIG_EFI_PARTITION is not set -CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_SUN3=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +CONFIG_PROC_HARDWARE=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -73,37 +52,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -114,8 +81,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -129,31 +94,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -163,6 +119,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -171,34 +128,21 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m @@ -213,136 +157,107 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_SUN3_SCSI=y CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +CONFIG_NET_ETHERNET=y CONFIG_SUN3LANCE=y -# CONFIG_NET_CADENCE is not set CONFIG_SUN3_82586=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_SUNKBD=y -# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_PS2=m CONFIG_MOUSE_SERIAL=m +# CONFIG_SERIO_SERPORT is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_PROC_HARDWARE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -381,23 +296,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -407,16 +309,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -432,14 +337,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=m CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/configs/sun3x_defconfig b/trunk/arch/m68k/configs/sun3x_defconfig index 832d9539f441..c80941c7759e 100644 --- a/trunk/arch/m68k/configs/sun3x_defconfig +++ b/trunk/arch/m68k/configs/sun3x_defconfig @@ -1,71 +1,50 @@ +CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="-sun3x" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -# CONFIG_EFI_PARTITION is not set -CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_SUN3X=y -# CONFIG_COMPACTION is not set -CONFIG_CLEANCACHE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m +CONFIG_PROC_HARDWARE=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_MIGRATE=y CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_GRE=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set # CONFIG_NF_CT_PROTO_DCCP is not set CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m @@ -73,37 +52,25 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m @@ -114,8 +81,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -129,31 +94,22 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -163,6 +119,7 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -171,34 +128,21 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m CONFIG_ATALK=m -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_CONNECTOR=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m @@ -213,136 +157,106 @@ CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m CONFIG_SUN3X_ESP=y CONFIG_MD=y +CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID456=m CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_UEVENT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m +CONFIG_MACVLAN=m CONFIG_EQUALIZER=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_VXLAN=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +CONFIG_NET_ETHERNET=y CONFIG_SUN3LANCE=y -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y -# CONFIG_WLAN is not set -CONFIG_INPUT_EVDEV=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_INPUT_FF_MEMLESS=m # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_SUNKBD=y -# CONFIG_MOUSE_PS2 is not set +CONFIG_MOUSE_PS2=m CONFIG_MOUSE_SERIAL=m +# CONFIG_SERIO_SERPORT is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set -CONFIG_NTP_PPS=y -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PTP_1588_CLOCK=m +CONFIG_GEN_RTC=m +CONFIG_GEN_RTC_X=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_UHID=m -# CONFIG_HID_GENERIC is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=m -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_PROC_HARDWARE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_FS_STATS is not set # CONFIG_OCFS2_DEBUG_MASKLOG is not set -CONFIG_FANOTIFY=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -CONFIG_CUSE=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m +CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_LZO=y -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m +CONFIG_MINIX_FS=y CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y -CONFIG_CIFS=m -# CONFIG_CIFS_DEBUG is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y CONFIG_CODA_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -381,23 +295,10 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_ENCRYPTED_KEYS=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_USER=m +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m @@ -407,16 +308,19 @@ CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -432,14 +336,6 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC16=m CONFIG_CRC_T10DIF=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_TEST=m diff --git a/trunk/arch/m68k/include/asm/Kbuild b/trunk/arch/m68k/include/asm/Kbuild index 09d77a862da3..c7933e41f10d 100644 --- a/trunk/arch/m68k/include/asm/Kbuild +++ b/trunk/arch/m68k/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += device.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += futex.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ipcbuf.h diff --git a/trunk/arch/m68k/include/asm/futex.h b/trunk/arch/m68k/include/asm/futex.h deleted file mode 100644 index bc868af10c96..000000000000 --- a/trunk/arch/m68k/include/asm/futex.h +++ /dev/null @@ -1,94 +0,0 @@ -#ifndef _ASM_M68K_FUTEX_H -#define _ASM_M68K_FUTEX_H - -#ifdef __KERNEL__ -#if !defined(CONFIG_MMU) -#include -#else /* CONFIG_MMU */ - -#include -#include -#include - -static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) -{ - u32 val; - - if (unlikely(get_user(val, uaddr) != 0)) - return -EFAULT; - - if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) - return -EFAULT; - - *uval = val; - - return 0; -} - -static inline int -futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) -{ - int op = (encoded_op >> 28) & 7; - int cmp = (encoded_op >> 24) & 15; - int oparg = (encoded_op << 8) >> 20; - int cmparg = (encoded_op << 20) >> 20; - int oldval, ret; - u32 tmp; - - if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) - oparg = 1 << oparg; - - pagefault_disable(); /* implies preempt_disable() */ - - ret = -EFAULT; - if (unlikely(get_user(oldval, uaddr) != 0)) - goto out_pagefault_enable; - - ret = 0; - tmp = oldval; - - switch (op) { - case FUTEX_OP_SET: - tmp = oparg; - break; - case FUTEX_OP_ADD: - tmp += oparg; - break; - case FUTEX_OP_OR: - tmp |= oparg; - break; - case FUTEX_OP_ANDN: - tmp &= ~oparg; - break; - case FUTEX_OP_XOR: - tmp ^= oparg; - break; - default: - ret = -ENOSYS; - } - - if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0)) - ret = -EFAULT; - -out_pagefault_enable: - pagefault_enable(); /* subsumes preempt_enable() */ - - if (ret == 0) { - switch (cmp) { - case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; - case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; - case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; - case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; - case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; - case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; - default: ret = -ENOSYS; - } - } - return ret; -} - -#endif /* CONFIG_MMU */ -#endif /* __KERNEL__ */ -#endif /* _ASM_M68K_FUTEX_H */ diff --git a/trunk/arch/m68k/include/asm/gpio.h b/trunk/arch/m68k/include/asm/gpio.h index 2f6eec1e34b4..8cc83431805b 100644 --- a/trunk/arch/m68k/include/asm/gpio.h +++ b/trunk/arch/m68k/include/asm/gpio.h @@ -86,7 +86,6 @@ static inline int gpio_cansleep(unsigned gpio) return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); } -#ifndef CONFIG_GPIOLIB static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) { int err; @@ -106,5 +105,5 @@ static inline int gpio_request_one(unsigned gpio, unsigned long flags, const cha return err; } -#endif /* !CONFIG_GPIOLIB */ + #endif diff --git a/trunk/arch/m68k/include/asm/pgtable_mm.h b/trunk/arch/m68k/include/asm/pgtable_mm.h index 9f5abbda1ea7..dc35e0e106e4 100644 --- a/trunk/arch/m68k/include/asm/pgtable_mm.h +++ b/trunk/arch/m68k/include/asm/pgtable_mm.h @@ -135,6 +135,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + /* MMU-specific headers */ #ifdef CONFIG_SUN3 diff --git a/trunk/arch/m68k/include/asm/pgtable_no.h b/trunk/arch/m68k/include/asm/pgtable_no.h index c527fc2ecf82..037028f4ab70 100644 --- a/trunk/arch/m68k/include/asm/pgtable_no.h +++ b/trunk/arch/m68k/include/asm/pgtable_no.h @@ -55,6 +55,9 @@ extern unsigned int kobjsize(const void *objp); */ #define pgtable_cache_init() do { } while (0) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + /* * All 32bit addresses are effectively valid for vmalloc... * Sort of meaningless for non-VM targets. diff --git a/trunk/arch/m68k/kernel/head.S b/trunk/arch/m68k/kernel/head.S index ac85f16534af..d197e7ff62c5 100644 --- a/trunk/arch/m68k/kernel/head.S +++ b/trunk/arch/m68k/kernel/head.S @@ -2752,9 +2752,11 @@ func_return get_new_page #ifdef CONFIG_MAC L(scc_initable_mac): + .byte 9,12 /* Reset */ .byte 4,0x44 /* x16, 1 stopbit, no parity */ .byte 3,0xc0 /* receiver: 8 bpc */ .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */ + .byte 9,0 /* no interrupts */ .byte 10,0 /* NRZ */ .byte 11,0x50 /* use baud rate generator */ .byte 12,1,13,0 /* 38400 baud */ @@ -2897,7 +2899,6 @@ func_start serial_init,%d0/%d1/%a0/%a1 is_not_mac(L(serial_init_not_mac)) #ifdef SERIAL_DEBUG - /* You may define either or both of these. */ #define MAC_USE_SCC_A /* Modem port */ #define MAC_USE_SCC_B /* Printer port */ @@ -2907,21 +2908,9 @@ func_start serial_init,%d0/%d1/%a0/%a1 #define mac_scc_cha_b_data_offset 0x4 #define mac_scc_cha_a_data_offset 0x6 -#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) - movel %pc@(L(mac_sccbase)),%a0 - /* Reset SCC device */ - moveb #9,%a0@(mac_scc_cha_a_ctrl_offset) - moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset) - /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */ - /* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */ - movel #35,%d0 -5: - subq #1,%d0 - jne 5b -#endif - #ifdef MAC_USE_SCC_A /* Initialize channel A */ + movel %pc@(L(mac_sccbase)),%a0 lea %pc@(L(scc_initable_mac)),%a1 5: moveb %a1@+,%d0 jmi 6f @@ -2933,6 +2922,9 @@ func_start serial_init,%d0/%d1/%a0/%a1 #ifdef MAC_USE_SCC_B /* Initialize channel B */ +#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */ + movel %pc@(L(mac_sccbase)),%a0 +#endif /* MAC_USE_SCC_A */ lea %pc@(L(scc_initable_mac)),%a1 7: moveb %a1@+,%d0 jmi 8f @@ -2941,7 +2933,6 @@ func_start serial_init,%d0/%d1/%a0/%a1 jra 7b 8: #endif /* MAC_USE_SCC_B */ - #endif /* SERIAL_DEBUG */ jra L(serial_init_done) @@ -3015,17 +3006,17 @@ func_start serial_putc,%d0/%d1/%a0/%a1 #ifdef SERIAL_DEBUG -#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) - movel %pc@(L(mac_sccbase)),%a1 -#endif - #ifdef MAC_USE_SCC_A + movel %pc@(L(mac_sccbase)),%a1 3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset) jeq 3b moveb %d0,%a1@(mac_scc_cha_a_data_offset) #endif /* MAC_USE_SCC_A */ #ifdef MAC_USE_SCC_B +#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */ + movel %pc@(L(mac_sccbase)),%a1 +#endif /* MAC_USE_SCC_A */ 4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset) jeq 4b moveb %d0,%a1@(mac_scc_cha_b_data_offset) diff --git a/trunk/arch/metag/include/asm/hugetlb.h b/trunk/arch/metag/include/asm/hugetlb.h index 471f481e67f3..f545477e61f3 100644 --- a/trunk/arch/metag/include/asm/hugetlb.h +++ b/trunk/arch/metag/include/asm/hugetlb.h @@ -2,7 +2,6 @@ #define _ASM_METAG_HUGETLB_H #include -#include static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/trunk/arch/metag/include/asm/pgtable.h b/trunk/arch/metag/include/asm/pgtable.h index 0d9dc5487296..1cd13d595198 100644 --- a/trunk/arch/metag/include/asm/pgtable.h +++ b/trunk/arch/metag/include/asm/pgtable.h @@ -333,6 +333,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + /* * No page table caches to initialise */ diff --git a/trunk/arch/microblaze/include/asm/cacheflush.h b/trunk/arch/microblaze/include/asm/cacheflush.h index ffea82a16d2c..0f553bc009a0 100644 --- a/trunk/arch/microblaze/include/asm/cacheflush.h +++ b/trunk/arch/microblaze/include/asm/cacheflush.h @@ -102,23 +102,21 @@ do { \ #define flush_cache_range(vma, start, len) do { } while (0) -static inline void copy_to_user_page(struct vm_area_struct *vma, - struct page *page, unsigned long vaddr, - void *dst, void *src, int len) -{ - u32 addr = virt_to_phys(dst); - memcpy(dst, src, len); - if (vma->vm_flags & VM_EXEC) { - invalidate_icache_range(addr, addr + PAGE_SIZE); - flush_dcache_range(addr, addr + PAGE_SIZE); - } -} - -static inline void copy_from_user_page(struct vm_area_struct *vma, - struct page *page, unsigned long vaddr, - void *dst, void *src, int len) -{ - memcpy(dst, src, len); -} +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + u32 addr = virt_to_phys(dst); \ + memcpy((dst), (src), (len)); \ + if (vma->vm_flags & VM_EXEC) { \ + invalidate_icache_range((unsigned) (addr), \ + (unsigned) (addr) + PAGE_SIZE); \ + flush_dcache_range((unsigned) (addr), \ + (unsigned) (addr) + PAGE_SIZE); \ + } \ +} while (0) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy((dst), (src), (len)); \ +} while (0) #endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */ diff --git a/trunk/arch/microblaze/include/asm/futex.h b/trunk/arch/microblaze/include/asm/futex.h index 01848f056f43..ff8cde159d9a 100644 --- a/trunk/arch/microblaze/include/asm/futex.h +++ b/trunk/arch/microblaze/include/asm/futex.h @@ -105,7 +105,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, __asm__ __volatile__ ("1: lwx %1, %3, r0; \ cmp %2, %1, %4; \ - bnei %2, 3f; \ + beqi %2, 3f; \ 2: swx %5, %3, r0; \ addic %2, r0, 0; \ bnei %2, 1b; \ diff --git a/trunk/arch/microblaze/include/asm/io.h b/trunk/arch/microblaze/include/asm/io.h index 2565cb94f32f..8cb8a8566ede 100644 --- a/trunk/arch/microblaze/include/asm/io.h +++ b/trunk/arch/microblaze/include/asm/io.h @@ -123,11 +123,11 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) * inb_p/inw_p/... * The macros don't do byte-swapping. */ -#define inb(port) readb((u8 *)((unsigned long)(port))) +#define inb(port) readb((u8 *)((port))) #define outb(val, port) writeb((val), (u8 *)((unsigned long)(port))) -#define inw(port) readw((u16 *)((unsigned long)(port))) +#define inw(port) readw((u16 *)((port))) #define outw(val, port) writew((val), (u16 *)((unsigned long)(port))) -#define inl(port) readl((u32 *)((unsigned long)(port))) +#define inl(port) readl((u32 *)((port))) #define outl(val, port) writel((val), (u32 *)((unsigned long)(port))) #define inb_p(port) inb((port)) diff --git a/trunk/arch/microblaze/include/asm/pgtable.h b/trunk/arch/microblaze/include/asm/pgtable.h index 95cef0b5f836..a7311cd9dee0 100644 --- a/trunk/arch/microblaze/include/asm/pgtable.h +++ b/trunk/arch/microblaze/include/asm/pgtable.h @@ -13,6 +13,9 @@ #include +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #ifndef __ASSEMBLY__ extern int mem_init_done; #endif diff --git a/trunk/arch/microblaze/include/asm/uaccess.h b/trunk/arch/microblaze/include/asm/uaccess.h index 04e49553bdf9..efe59d881789 100644 --- a/trunk/arch/microblaze/include/asm/uaccess.h +++ b/trunk/arch/microblaze/include/asm/uaccess.h @@ -99,13 +99,13 @@ static inline int access_ok(int type, const void __user *addr, if ((get_fs().seg < ((unsigned long)addr)) || (get_fs().seg < ((unsigned long)addr + size - 1))) { pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", - type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, + type ? "WRITE" : "READ ", (u32)addr, (u32)size, (u32)get_fs().seg); return 0; } ok: pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", - type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, + type ? "WRITE" : "READ ", (u32)addr, (u32)size, (u32)get_fs().seg); return 1; } diff --git a/trunk/arch/microblaze/kernel/cpu/cache.c b/trunk/arch/microblaze/kernel/cpu/cache.c index a6e44410672d..4254514b4c8c 100644 --- a/trunk/arch/microblaze/kernel/cpu/cache.c +++ b/trunk/arch/microblaze/kernel/cpu/cache.c @@ -140,7 +140,7 @@ do { \ /* It is used only first parameter for OP - for wic, wdc */ #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ do { \ - int volatile temp = 0; \ + int volatile temp; \ int align = ~(line_length - 1); \ end = ((end & align) == end) ? end - line_length : end & align; \ WARN_ON(end - start < 0); \ diff --git a/trunk/arch/mips/alchemy/board-gpr.c b/trunk/arch/mips/alchemy/board-gpr.c index 9edc35ff8cf1..cb0f6afb7389 100644 --- a/trunk/arch/mips/alchemy/board-gpr.c +++ b/trunk/arch/mips/alchemy/board-gpr.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/mips/alchemy/common/time.c b/trunk/arch/mips/alchemy/common/time.c index 93fa586d52e2..38afb11ba2c4 100644 --- a/trunk/arch/mips/alchemy/common/time.c +++ b/trunk/arch/mips/alchemy/common/time.c @@ -36,7 +36,6 @@ #include #include -#include #include #include #include diff --git a/trunk/arch/mips/ath79/setup.c b/trunk/arch/mips/ath79/setup.c index 8be4e856b8b8..a0233a2c1988 100644 --- a/trunk/arch/mips/ath79/setup.c +++ b/trunk/arch/mips/ath79/setup.c @@ -19,7 +19,6 @@ #include #include -#include #include /* for mips_hpt_frequency */ #include /* for _machine_{restart,halt} */ #include diff --git a/trunk/arch/mips/cavium-octeon/setup.c b/trunk/arch/mips/cavium-octeon/setup.c index 01b1b3f94feb..b0baa299f899 100644 --- a/trunk/arch/mips/cavium-octeon/setup.c +++ b/trunk/arch/mips/cavium-octeon/setup.c @@ -428,16 +428,13 @@ static void octeon_restart(char *command) */ static void octeon_kill_core(void *arg) { - if (octeon_is_simulation()) + mb(); + if (octeon_is_simulation()) { + /* The simulator needs the watchdog to stop for dead cores */ + cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); /* A break instruction causes the simulator stop a core */ - asm volatile ("break" ::: "memory"); - - local_irq_disable(); - /* Disable watchdog on this core. */ - cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); - /* Spin in a low power mode. */ - while (true) - asm volatile ("wait" ::: "memory"); + asm volatile ("sync\nbreak"); + } } diff --git a/trunk/arch/mips/cobalt/reset.c b/trunk/arch/mips/cobalt/reset.c index 4eedd481dd00..516b4428df4e 100644 --- a/trunk/arch/mips/cobalt/reset.c +++ b/trunk/arch/mips/cobalt/reset.c @@ -12,7 +12,6 @@ #include #include -#include #include #include diff --git a/trunk/arch/mips/configs/db1000_defconfig b/trunk/arch/mips/configs/db1000_defconfig index bac26b971c5e..face9d26e6d5 100644 --- a/trunk/arch/mips/configs/db1000_defconfig +++ b/trunk/arch/mips/configs/db1000_defconfig @@ -228,6 +228,7 @@ CONFIG_HIDRAW=y CONFIG_USB_HID=y CONFIG_USB_SUPPORT=y CONFIG_USB=y +CONFIG_USB_SUSPEND=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_TT_NEWSCHED=y diff --git a/trunk/arch/mips/configs/db1235_defconfig b/trunk/arch/mips/configs/db1235_defconfig index e2b4ad55462f..14752dde7540 100644 --- a/trunk/arch/mips/configs/db1235_defconfig +++ b/trunk/arch/mips/configs/db1235_defconfig @@ -344,6 +344,7 @@ CONFIG_UHID=y CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_DYNAMIC_MINORS=y +CONFIG_USB_SUSPEND=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_EHCI_ROOT_HUB_TT=y diff --git a/trunk/arch/mips/configs/lemote2f_defconfig b/trunk/arch/mips/configs/lemote2f_defconfig index 343bebc4b63b..b6acd2f256b6 100644 --- a/trunk/arch/mips/configs/lemote2f_defconfig +++ b/trunk/arch/mips/configs/lemote2f_defconfig @@ -300,6 +300,7 @@ CONFIG_USB=y CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_DYNAMIC_MINORS=y +CONFIG_USB_SUSPEND=y CONFIG_USB_OTG_WHITELIST=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y diff --git a/trunk/arch/mips/include/asm/clock.h b/trunk/arch/mips/include/asm/clock.h index 778e32d817bc..c9456e7a7283 100644 --- a/trunk/arch/mips/include/asm/clock.h +++ b/trunk/arch/mips/include/asm/clock.h @@ -6,6 +6,8 @@ #include #include +extern void (*cpu_wait) (void); + struct clk; struct clk_ops { diff --git a/trunk/arch/mips/include/asm/idle.h b/trunk/arch/mips/include/asm/idle.h deleted file mode 100644 index d192158886b1..000000000000 --- a/trunk/arch/mips/include/asm/idle.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __ASM_IDLE_H -#define __ASM_IDLE_H - -#include - -extern void (*cpu_wait)(void); -extern void r4k_wait(void); -extern asmlinkage void __r4k_wait(void); -extern void r4k_wait_irqoff(void); -extern void __pastwait(void); - -static inline int using_rollback_handler(void) -{ - return cpu_wait == r4k_wait; -} - -static inline int address_is_in_r4k_wait_irqoff(unsigned long addr) -{ - return addr >= (unsigned long)r4k_wait_irqoff && - addr < (unsigned long)__pastwait; -} - -#endif /* __ASM_IDLE_H */ diff --git a/trunk/arch/mips/include/asm/io.h b/trunk/arch/mips/include/asm/io.h index b7e59853fd33..1be13727323f 100644 --- a/trunk/arch/mips/include/asm/io.h +++ b/trunk/arch/mips/include/asm/io.h @@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base) */ static inline unsigned long virt_to_phys(volatile const void *address) { - return __pa(address); + return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET; } /* diff --git a/trunk/arch/mips/include/asm/kvm.h b/trunk/arch/mips/include/asm/kvm.h new file mode 100644 index 000000000000..85789eacbf18 --- /dev/null +++ b/trunk/arch/mips/include/asm/kvm.h @@ -0,0 +1,55 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#ifndef __LINUX_KVM_MIPS_H +#define __LINUX_KVM_MIPS_H + +#include + +#define __KVM_MIPS + +#define N_MIPS_COPROC_REGS 32 +#define N_MIPS_COPROC_SEL 8 + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { + __u32 gprs[32]; + __u32 hi; + __u32 lo; + __u32 pc; + + __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; +}; + +/* for KVM_GET_SREGS and KVM_SET_SREGS */ +struct kvm_sregs { +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { +}; + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +struct kvm_mips_interrupt { + /* in */ + __u32 cpu; + __u32 irq; +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +#endif /* __LINUX_KVM_MIPS_H */ diff --git a/trunk/arch/mips/include/asm/kvm_host.h b/trunk/arch/mips/include/asm/kvm_host.h index 4d6fa0bf1305..e68781e18387 100644 --- a/trunk/arch/mips/include/asm/kvm_host.h +++ b/trunk/arch/mips/include/asm/kvm_host.h @@ -336,7 +336,7 @@ enum emulation_result { #define VPN2_MASK 0xffffe000 #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) -#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) +#define TLB_ASID(x) (ASID_MASK((x).tlb_hi)) #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) struct kvm_mips_tlb { @@ -496,6 +496,10 @@ struct kvm_mips_callbacks { uint32_t cause); int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, uint32_t cause); + int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu, + struct kvm_regs *regs); + int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu, + struct kvm_regs *regs); }; extern struct kvm_mips_callbacks *kvm_mips_callbacks; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); diff --git a/trunk/arch/mips/include/asm/mmu_context.h b/trunk/arch/mips/include/asm/mmu_context.h index 516e6e9a5594..1554721e4808 100644 --- a/trunk/arch/mips/include/asm/mmu_context.h +++ b/trunk/arch/mips/include/asm/mmu_context.h @@ -67,45 +67,68 @@ extern unsigned long pgd_current[]; TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) #endif #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ -#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) -#define ASID_INC 0x40 -#define ASID_MASK 0xfc0 - -#elif defined(CONFIG_CPU_R8000) - -#define ASID_INC 0x10 -#define ASID_MASK 0xff0 - -#elif defined(CONFIG_MIPS_MT_SMTC) - -#define ASID_INC 0x1 -extern unsigned long smtc_asid_mask; -#define ASID_MASK (smtc_asid_mask) -#define HW_ASID_MASK 0xff -/* End SMTC/34K debug hack */ -#else /* FIXME: not correct for R6000 */ - -#define ASID_INC 0x1 -#define ASID_MASK 0xff +#define ASID_INC(asid) \ +({ \ + unsigned long __asid = asid; \ + __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \ + ".section\t__asid_inc,\"a\"\n\t" \ + ".word\t1b\n\t" \ + ".previous" \ + :"=r" (__asid) \ + :"0" (__asid)); \ + __asid; \ +}) +#define ASID_MASK(asid) \ +({ \ + unsigned long __asid = asid; \ + __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \ + ".section\t__asid_mask,\"a\"\n\t" \ + ".word\t1b\n\t" \ + ".previous" \ + :"=r" (__asid) \ + :"r" (__asid)); \ + __asid; \ +}) +#define ASID_VERSION_MASK \ +({ \ + unsigned long __asid; \ + __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \ + ".section\t__asid_version_mask,\"a\"\n\t" \ + ".word\t1b\n\t" \ + ".previous" \ + :"=r" (__asid)); \ + __asid; \ +}) +#define ASID_FIRST_VERSION \ +({ \ + unsigned long __asid = asid; \ + __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \ + ".section\t__asid_first_version,\"a\"\n\t" \ + ".word\t1b\n\t" \ + ".previous" \ + :"=r" (__asid)); \ + __asid; \ +}) + +#define ASID_FIRST_VERSION_R3000 0x1000 +#define ASID_FIRST_VERSION_R4000 0x100 +#define ASID_FIRST_VERSION_R8000 0x1000 +#define ASID_FIRST_VERSION_RM9000 0x1000 +#ifdef CONFIG_MIPS_MT_SMTC +#define SMTC_HW_ASID_MASK 0xff +extern unsigned int smtc_asid_mask; #endif #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) -#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) +#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm))) #define asid_cache(cpu) (cpu_data[cpu].asid_cache) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } -/* - * All unused by hardware upper bits will be considered - * as a software asid extension. - */ -#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) -#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) - #ifndef CONFIG_MIPS_MT_SMTC /* Normal, classic MIPS get_new_mmu_context */ static inline void @@ -114,10 +137,10 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) extern void kvm_local_flush_tlb_all(void); unsigned long asid = asid_cache(cpu); - if (! ((asid += ASID_INC) & ASID_MASK) ) { + if (!ASID_MASK((asid = ASID_INC(asid)))) { if (cpu_has_vtag_icache) flush_icache_all(); -#ifdef CONFIG_KVM +#ifdef CONFIG_VIRTUALIZATION kvm_local_flush_tlb_all(); /* start new asid cycle */ #else local_flush_tlb_all(); /* start new asid cycle */ @@ -177,7 +200,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * free up the ASID value for use and flush any old * instances of it from the TLB. */ - oldasid = (read_c0_entryhi() & ASID_MASK); + oldasid = ASID_MASK(read_c0_entryhi()); if(smtc_live_asid[mytlb][oldasid]) { smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); if(smtc_live_asid[mytlb][oldasid] == 0) @@ -188,7 +211,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * having ASID_MASK smaller than the hardware maximum, * make sure no "soft" bits become "hard"... */ - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | + write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | cpu_asid(cpu, next)); ehb(); /* Make sure it propagates to TCStatus */ evpe(mtflags); @@ -241,15 +264,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) #ifdef CONFIG_MIPS_MT_SMTC /* See comments for similar code above */ mtflags = dvpe(); - oldasid = read_c0_entryhi() & ASID_MASK; + oldasid = ASID_MASK(read_c0_entryhi()); if(smtc_live_asid[mytlb][oldasid]) { smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); if(smtc_live_asid[mytlb][oldasid] == 0) smtc_flush_tlb_asid(oldasid); } /* See comments for similar code above */ - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | - cpu_asid(cpu, next)); + write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | + cpu_asid(cpu, next)); ehb(); /* Make sure it propagates to TCStatus */ evpe(mtflags); #else @@ -286,14 +309,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) #ifdef CONFIG_MIPS_MT_SMTC /* See comments for similar code above */ prevvpe = dvpe(); - oldasid = (read_c0_entryhi() & ASID_MASK); + oldasid = ASID_MASK(read_c0_entryhi()); if (smtc_live_asid[mytlb][oldasid]) { smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); if(smtc_live_asid[mytlb][oldasid] == 0) smtc_flush_tlb_asid(oldasid); } /* See comments for similar code above */ - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) + write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | cpu_asid(cpu, mm)); ehb(); /* Make sure it propagates to TCStatus */ evpe(prevvpe); diff --git a/trunk/arch/mips/include/asm/page.h b/trunk/arch/mips/include/asm/page.h index f59552fae917..eab99e536b5c 100644 --- a/trunk/arch/mips/include/asm/page.h +++ b/trunk/arch/mips/include/asm/page.h @@ -46,6 +46,7 @@ #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ #include +#include extern void build_clear_page(void); extern void build_copy_page(void); @@ -150,7 +151,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) #endif #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) -#include /* * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad @@ -171,13 +171,14 @@ typedef struct { unsigned long pgprot; } pgprot_t; #ifdef CONFIG_FLATMEM -static inline int pfn_valid(unsigned long pfn) -{ - /* avoid include hell */ - extern unsigned long max_mapnr; - - return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr; -} +#define pfn_valid(pfn) \ +({ \ + unsigned long __pfn = (pfn); \ + /* avoid include hell */ \ + extern unsigned long min_low_pfn; \ + \ + __pfn >= min_low_pfn && __pfn < max_mapnr; \ +}) #elif defined(CONFIG_SPARSEMEM) diff --git a/trunk/arch/mips/include/asm/pgtable.h b/trunk/arch/mips/include/asm/pgtable.h index 008324d1c261..8b8f6b393363 100644 --- a/trunk/arch/mips/include/asm/pgtable.h +++ b/trunk/arch/mips/include/asm/pgtable.h @@ -394,7 +394,9 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); } -#define io_remap_pfn_range io_remap_pfn_range +#else +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/trunk/arch/mips/include/asm/processor.h b/trunk/arch/mips/include/asm/processor.h index 1470b7b68b0e..71686c897dea 100644 --- a/trunk/arch/mips/include/asm/processor.h +++ b/trunk/arch/mips/include/asm/processor.h @@ -28,6 +28,7 @@ /* * System setup and hardware flags.. */ +extern void (*cpu_wait)(void); extern unsigned int vced_count, vcei_count; diff --git a/trunk/arch/mips/include/asm/ptrace.h b/trunk/arch/mips/include/asm/ptrace.h index 5e6cd0947393..a3186f2bb8a0 100644 --- a/trunk/arch/mips/include/asm/ptrace.h +++ b/trunk/arch/mips/include/asm/ptrace.h @@ -16,38 +16,6 @@ #include #include -/* - * This struct defines the way the registers are stored on the stack during a - * system call/exception. As usual the registers k0/k1 aren't being saved. - */ -struct pt_regs { -#ifdef CONFIG_32BIT - /* Pad bytes for argument save space on the stack. */ - unsigned long pad0[6]; -#endif - - /* Saved main processor registers. */ - unsigned long regs[32]; - - /* Saved special registers. */ - unsigned long cp0_status; - unsigned long hi; - unsigned long lo; -#ifdef CONFIG_CPU_HAS_SMARTMIPS - unsigned long acx; -#endif - unsigned long cp0_badvaddr; - unsigned long cp0_cause; - unsigned long cp0_epc; -#ifdef CONFIG_MIPS_MT_SMTC - unsigned long cp0_tcstatus; -#endif /* CONFIG_MIPS_MT_SMTC */ -#ifdef CONFIG_CPU_CAVIUM_OCTEON - unsigned long long mpl[3]; /* MTM{0,1,2} */ - unsigned long long mtp[3]; /* MTP{0,1,2} */ -#endif -} __aligned(8); - struct task_struct; extern int ptrace_getregs(struct task_struct *child, __s64 __user *data); diff --git a/trunk/arch/mips/include/uapi/asm/kvm.h b/trunk/arch/mips/include/uapi/asm/kvm.h deleted file mode 100644 index f09ff5ae2059..000000000000 --- a/trunk/arch/mips/include/uapi/asm/kvm.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Copyright (C) 2013 Cavium, Inc. - * Authors: Sanjay Lal - */ - -#ifndef __LINUX_KVM_MIPS_H -#define __LINUX_KVM_MIPS_H - -#include - -/* - * KVM MIPS specific structures and definitions. - * - * Some parts derived from the x86 version of this file. - */ - -/* - * for KVM_GET_REGS and KVM_SET_REGS - * - * If Config[AT] is zero (32-bit CPU), the register contents are - * stored in the lower 32-bits of the struct kvm_regs fields and sign - * extended to 64-bits. - */ -struct kvm_regs { - /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ - __u64 gpr[32]; - __u64 hi; - __u64 lo; - __u64 pc; -}; - -/* - * for KVM_GET_FPU and KVM_SET_FPU - * - * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs - * are zero filled. - */ -struct kvm_fpu { - __u64 fpr[32]; - __u32 fir; - __u32 fccr; - __u32 fexr; - __u32 fenr; - __u32 fcsr; - __u32 pad; -}; - - -/* - * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0 - * registers. The id field is broken down as follows: - * - * bits[2..0] - Register 'sel' index. - * bits[7..3] - Register 'rd' index. - * bits[15..8] - Must be zero. - * bits[31..16] - 1 -> CP0 registers. - * bits[51..32] - Must be zero. - * bits[63..52] - As per linux/kvm.h - * - * Other sets registers may be added in the future. Each set would - * have its own identifier in bits[31..16]. - * - * The registers defined in struct kvm_regs are also accessible, the - * id values for these are below. - */ - -#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0) -#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1) -#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2) -#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3) -#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4) -#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5) -#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6) -#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7) -#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8) -#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9) -#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10) -#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11) -#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12) -#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13) -#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14) -#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15) -#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16) -#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17) -#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18) -#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19) -#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20) -#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21) -#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22) -#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23) -#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24) -#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25) -#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26) -#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27) -#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28) -#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29) -#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30) -#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31) - -#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32) -#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) -#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) - -/* - * KVM MIPS specific structures and definitions - * - */ -struct kvm_debug_exit_arch { - __u64 epc; -}; - -/* for KVM_SET_GUEST_DEBUG */ -struct kvm_guest_debug_arch { -}; - -/* definition of registers in kvm_run */ -struct kvm_sync_regs { -}; - -/* dummy definition */ -struct kvm_sregs { -}; - -struct kvm_mips_interrupt { - /* in */ - __u32 cpu; - __u32 irq; -}; - -#endif /* __LINUX_KVM_MIPS_H */ diff --git a/trunk/arch/mips/include/uapi/asm/ptrace.h b/trunk/arch/mips/include/uapi/asm/ptrace.h index b26f7e317279..4d58d8468705 100644 --- a/trunk/arch/mips/include/uapi/asm/ptrace.h +++ b/trunk/arch/mips/include/uapi/asm/ptrace.h @@ -22,12 +22,16 @@ #define DSP_CONTROL 77 #define ACX 78 -#ifndef __KERNEL__ /* * This struct defines the way the registers are stored on the stack during a * system call/exception. As usual the registers k0/k1 aren't being saved. */ struct pt_regs { +#ifdef CONFIG_32BIT + /* Pad bytes for argument save space on the stack. */ + unsigned long pad0[6]; +#endif + /* Saved main processor registers. */ unsigned long regs[32]; @@ -35,11 +39,20 @@ struct pt_regs { unsigned long cp0_status; unsigned long hi; unsigned long lo; +#ifdef CONFIG_CPU_HAS_SMARTMIPS + unsigned long acx; +#endif unsigned long cp0_badvaddr; unsigned long cp0_cause; unsigned long cp0_epc; +#ifdef CONFIG_MIPS_MT_SMTC + unsigned long cp0_tcstatus; +#endif /* CONFIG_MIPS_MT_SMTC */ +#ifdef CONFIG_CPU_CAVIUM_OCTEON + unsigned long long mpl[3]; /* MTM{0,1,2} */ + unsigned long long mtp[3]; /* MTP{0,1,2} */ +#endif } __attribute__ ((aligned (8))); -#endif /* __KERNEL__ */ /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ #define PTRACE_GETREGS 12 diff --git a/trunk/arch/mips/include/uapi/asm/unistd.h b/trunk/arch/mips/include/uapi/asm/unistd.h index 1dee279f9665..16338b84fa79 100644 --- a/trunk/arch/mips/include/uapi/asm/unistd.h +++ b/trunk/arch/mips/include/uapi/asm/unistd.h @@ -694,17 +694,16 @@ #define __NR_process_vm_writev (__NR_Linux + 305) #define __NR_kcmp (__NR_Linux + 306) #define __NR_finit_module (__NR_Linux + 307) -#define __NR_getdents64 (__NR_Linux + 308) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 308 +#define __NR_Linux_syscalls 307 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 308 +#define __NR_64_Linux_syscalls 307 #if _MIPS_SIM == _MIPS_SIM_NABI32 diff --git a/trunk/arch/mips/kernel/Makefile b/trunk/arch/mips/kernel/Makefile index 423d871a946b..6ad9e04bdf62 100644 --- a/trunk/arch/mips/kernel/Makefile +++ b/trunk/arch/mips/kernel/Makefile @@ -4,7 +4,7 @@ extra-y := head.o vmlinux.lds -obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \ +obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ prom.o ptrace.o reset.o setup.o signal.o syscall.o \ time.o topology.o traps.o unaligned.o watch.o vdso.o diff --git a/trunk/arch/mips/kernel/binfmt_elfn32.c b/trunk/arch/mips/kernel/binfmt_elfn32.c index 1188e00bb120..e06f777e9c49 100644 --- a/trunk/arch/mips/kernel/binfmt_elfn32.c +++ b/trunk/arch/mips/kernel/binfmt_elfn32.c @@ -119,15 +119,4 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 -#undef cputime_to_timeval -#define cputime_to_timeval cputime_to_compat_timeval -static __inline__ void -cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) -{ - unsigned long jiffies = cputime_to_jiffies(cputime); - - value->tv_usec = (jiffies % HZ) * (1000000L / HZ); - value->tv_sec = jiffies / HZ; -} - #include "../../../fs/binfmt_elf.c" diff --git a/trunk/arch/mips/kernel/binfmt_elfo32.c b/trunk/arch/mips/kernel/binfmt_elfo32.c index 202e581e6096..97c5a1668e53 100644 --- a/trunk/arch/mips/kernel/binfmt_elfo32.c +++ b/trunk/arch/mips/kernel/binfmt_elfo32.c @@ -162,15 +162,4 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 -#undef cputime_to_timeval -#define cputime_to_timeval cputime_to_compat_timeval -static __inline__ void -cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) -{ - unsigned long jiffies = cputime_to_jiffies(cputime); - - value->tv_usec = (jiffies % HZ) * (1000000L / HZ); - value->tv_sec = jiffies / HZ; -} - #include "../../../fs/binfmt_elf.c" diff --git a/trunk/arch/mips/kernel/cpu-probe.c b/trunk/arch/mips/kernel/cpu-probe.c index c6568bf4b1b0..4bbffdb9024f 100644 --- a/trunk/arch/mips/kernel/cpu-probe.c +++ b/trunk/arch/mips/kernel/cpu-probe.c @@ -27,6 +27,105 @@ #include #include +/* + * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, + * the implementation of the "wait" feature differs between CPU families. This + * points to the function that implements CPU specific wait. + * The wait instruction stops the pipeline and reduces the power consumption of + * the CPU very much. + */ +void (*cpu_wait)(void); +EXPORT_SYMBOL(cpu_wait); + +static void r3081_wait(void) +{ + unsigned long cfg = read_c0_conf(); + write_c0_conf(cfg | R30XX_CONF_HALT); +} + +static void r39xx_wait(void) +{ + local_irq_disable(); + if (!need_resched()) + write_c0_conf(read_c0_conf() | TX39_CONF_HALT); + local_irq_enable(); +} + +extern void r4k_wait(void); + +/* + * This variant is preferable as it allows testing need_resched and going to + * sleep depending on the outcome atomically. Unfortunately the "It is + * implementation-dependent whether the pipeline restarts when a non-enabled + * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes + * using this version a gamble. + */ +void r4k_wait_irqoff(void) +{ + local_irq_disable(); + if (!need_resched()) + __asm__(" .set push \n" + " .set mips3 \n" + " wait \n" + " .set pop \n"); + local_irq_enable(); + __asm__(" .globl __pastwait \n" + "__pastwait: \n"); +} + +/* + * The RM7000 variant has to handle erratum 38. The workaround is to not + * have any pending stores when the WAIT instruction is executed. + */ +static void rm7k_wait_irqoff(void) +{ + local_irq_disable(); + if (!need_resched()) + __asm__( + " .set push \n" + " .set mips3 \n" + " .set noat \n" + " mfc0 $1, $12 \n" + " sync \n" + " mtc0 $1, $12 # stalls until W stage \n" + " wait \n" + " mtc0 $1, $12 # stalls until W stage \n" + " .set pop \n"); + local_irq_enable(); +} + +/* + * The Au1xxx wait is available only if using 32khz counter or + * external timer source, but specifically not CP0 Counter. + * alchemy/common/time.c may override cpu_wait! + */ +static void au1k_wait(void) +{ + __asm__(" .set mips3 \n" + " cache 0x14, 0(%0) \n" + " cache 0x14, 32(%0) \n" + " sync \n" + " nop \n" + " wait \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " .set mips0 \n" + : : "r" (au1k_wait)); +} + +static int __initdata nowait; + +static int __init wait_disable(char *s) +{ + nowait = 1; + + return 1; +} + +__setup("nowait", wait_disable); + static int __cpuinitdata mips_fpu_disabled; static int __init fpu_disable(char *s) @@ -51,6 +150,105 @@ static int __init dsp_disable(char *s) __setup("nodsp", dsp_disable); +void __init check_wait(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + if (nowait) { + printk("Wait instruction disabled.\n"); + return; + } + + switch (c->cputype) { + case CPU_R3081: + case CPU_R3081E: + cpu_wait = r3081_wait; + break; + case CPU_TX3927: + cpu_wait = r39xx_wait; + break; + case CPU_R4200: +/* case CPU_R4300: */ + case CPU_R4600: + case CPU_R4640: + case CPU_R4650: + case CPU_R4700: + case CPU_R5000: + case CPU_R5500: + case CPU_NEVADA: + case CPU_4KC: + case CPU_4KEC: + case CPU_4KSC: + case CPU_5KC: + case CPU_25KF: + case CPU_PR4450: + case CPU_BMIPS3300: + case CPU_BMIPS4350: + case CPU_BMIPS4380: + case CPU_BMIPS5000: + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + case CPU_JZRISC: + case CPU_LOONGSON1: + case CPU_XLR: + case CPU_XLP: + cpu_wait = r4k_wait; + break; + + case CPU_RM7000: + cpu_wait = rm7k_wait_irqoff; + break; + + case CPU_M14KC: + case CPU_M14KEC: + case CPU_24K: + case CPU_34K: + case CPU_1004K: + cpu_wait = r4k_wait; + if (read_c0_config7() & MIPS_CONF7_WII) + cpu_wait = r4k_wait_irqoff; + break; + + case CPU_74K: + cpu_wait = r4k_wait; + if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) + cpu_wait = r4k_wait_irqoff; + break; + + case CPU_TX49XX: + cpu_wait = r4k_wait_irqoff; + break; + case CPU_ALCHEMY: + cpu_wait = au1k_wait; + break; + case CPU_20KC: + /* + * WAIT on Rev1.0 has E1, E2, E3 and E16. + * WAIT on Rev2.0 and Rev3.0 has E16. + * Rev3.1 WAIT is nop, why bother + */ + if ((c->processor_id & 0xff) <= 0x64) + break; + + /* + * Another rev is incremeting c0_count at a reduced clock + * rate while in WAIT mode. So we basically have the choice + * between using the cp0 timer as clocksource or avoiding + * the WAIT instruction. Until more details are known, + * disable the use of WAIT for 20Kc entirely. + cpu_wait = r4k_wait; + */ + break; + case CPU_RM9000: + if ((c->processor_id & 0x00ff) >= 0x40) + cpu_wait = r4k_wait; + break; + default: + break; + } +} + static inline void check_errata(void) { struct cpuinfo_mips *c = ¤t_cpu_data; diff --git a/trunk/arch/mips/kernel/crash_dump.c b/trunk/arch/mips/kernel/crash_dump.c index 3be9e7bb30ff..35bed0d2342c 100644 --- a/trunk/arch/mips/kernel/crash_dump.c +++ b/trunk/arch/mips/kernel/crash_dump.c @@ -2,7 +2,6 @@ #include #include #include -#include static int __init parse_savemaxmem(char *p) { diff --git a/trunk/arch/mips/kernel/ftrace.c b/trunk/arch/mips/kernel/ftrace.c index dba90ec0dc38..cf5509f13dd5 100644 --- a/trunk/arch/mips/kernel/ftrace.c +++ b/trunk/arch/mips/kernel/ftrace.c @@ -25,16 +25,12 @@ #define MCOUNT_OFFSET_INSNS 4 #endif -#ifdef CONFIG_DYNAMIC_FTRACE - /* Arch override because MIPS doesn't need to run this from stop_machine() */ void arch_ftrace_update_code(int command) { ftrace_modify_all_code(command); } -#endif - /* * Check if the address is in kernel space * diff --git a/trunk/arch/mips/kernel/genex.S b/trunk/arch/mips/kernel/genex.S index 31fa856829cb..5c2ba9f08a80 100644 --- a/trunk/arch/mips/kernel/genex.S +++ b/trunk/arch/mips/kernel/genex.S @@ -122,7 +122,7 @@ handle_vcei: __FINIT .align 5 /* 32 byte rollback region */ -LEAF(__r4k_wait) +LEAF(r4k_wait) .set push .set noreorder /* start of rollback region */ @@ -146,14 +146,14 @@ LEAF(__r4k_wait) jr ra nop .set pop - END(__r4k_wait) + END(r4k_wait) .macro BUILD_ROLLBACK_PROLOGUE handler FEXPORT(rollback_\handler) .set push .set noat MFC0 k0, CP0_EPC - PTR_LA k1, __r4k_wait + PTR_LA k1, r4k_wait ori k0, 0x1f /* 32 byte rollback region */ xori k0, 0x1f bne k0, k1, 9f @@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set noreorder /* check if TLB contains a entry for EPC */ MFC0 k1, CP0_ENTRYHI - andi k1, 0xff /* ASID_MASK */ + andi k1, 0xff /* ASID_MASK patched at run-time!! */ MFC0 k0, CP0_EPC PTR_SRL k0, _PAGE_SHIFT + 1 PTR_SLL k0, _PAGE_SHIFT + 1 diff --git a/trunk/arch/mips/kernel/idle.c b/trunk/arch/mips/kernel/idle.c deleted file mode 100644 index 0c655deeea4a..000000000000 --- a/trunk/arch/mips/kernel/idle.c +++ /dev/null @@ -1,245 +0,0 @@ -/* - * MIPS idle loop and WAIT instruction support. - * - * Copyright (C) xxxx the Anonymous - * Copyright (C) 1994 - 2006 Ralf Baechle - * Copyright (C) 2003, 2004 Maciej W. Rozycki - * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, - * the implementation of the "wait" feature differs between CPU families. This - * points to the function that implements CPU specific wait. - * The wait instruction stops the pipeline and reduces the power consumption of - * the CPU very much. - */ -void (*cpu_wait)(void); -EXPORT_SYMBOL(cpu_wait); - -static void r3081_wait(void) -{ - unsigned long cfg = read_c0_conf(); - write_c0_conf(cfg | R30XX_CONF_HALT); - local_irq_enable(); -} - -static void r39xx_wait(void) -{ - if (!need_resched()) - write_c0_conf(read_c0_conf() | TX39_CONF_HALT); - local_irq_enable(); -} - -void r4k_wait(void) -{ - local_irq_enable(); - __r4k_wait(); -} - -/* - * This variant is preferable as it allows testing need_resched and going to - * sleep depending on the outcome atomically. Unfortunately the "It is - * implementation-dependent whether the pipeline restarts when a non-enabled - * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes - * using this version a gamble. - */ -void r4k_wait_irqoff(void) -{ - if (!need_resched()) - __asm__( - " .set push \n" - " .set mips3 \n" - " wait \n" - " .set pop \n"); - local_irq_enable(); - __asm__( - " .globl __pastwait \n" - "__pastwait: \n"); -} - -/* - * The RM7000 variant has to handle erratum 38. The workaround is to not - * have any pending stores when the WAIT instruction is executed. - */ -static void rm7k_wait_irqoff(void) -{ - if (!need_resched()) - __asm__( - " .set push \n" - " .set mips3 \n" - " .set noat \n" - " mfc0 $1, $12 \n" - " sync \n" - " mtc0 $1, $12 # stalls until W stage \n" - " wait \n" - " mtc0 $1, $12 # stalls until W stage \n" - " .set pop \n"); - local_irq_enable(); -} - -/* - * Au1 'wait' is only useful when the 32kHz counter is used as timer, - * since coreclock (and the cp0 counter) stops upon executing it. Only an - * interrupt can wake it, so they must be enabled before entering idle modes. - */ -static void au1k_wait(void) -{ - unsigned long c0status = read_c0_status() | 1; /* irqs on */ - - __asm__( - " .set mips3 \n" - " cache 0x14, 0(%0) \n" - " cache 0x14, 32(%0) \n" - " sync \n" - " mtc0 %1, $12 \n" /* wr c0status */ - " wait \n" - " nop \n" - " nop \n" - " nop \n" - " nop \n" - " .set mips0 \n" - : : "r" (au1k_wait), "r" (c0status)); -} - -static int __initdata nowait; - -static int __init wait_disable(char *s) -{ - nowait = 1; - - return 1; -} - -__setup("nowait", wait_disable); - -void __init check_wait(void) -{ - struct cpuinfo_mips *c = ¤t_cpu_data; - - if (nowait) { - printk("Wait instruction disabled.\n"); - return; - } - - switch (c->cputype) { - case CPU_R3081: - case CPU_R3081E: - cpu_wait = r3081_wait; - break; - case CPU_TX3927: - cpu_wait = r39xx_wait; - break; - case CPU_R4200: -/* case CPU_R4300: */ - case CPU_R4600: - case CPU_R4640: - case CPU_R4650: - case CPU_R4700: - case CPU_R5000: - case CPU_R5500: - case CPU_NEVADA: - case CPU_4KC: - case CPU_4KEC: - case CPU_4KSC: - case CPU_5KC: - case CPU_25KF: - case CPU_PR4450: - case CPU_BMIPS3300: - case CPU_BMIPS4350: - case CPU_BMIPS4380: - case CPU_BMIPS5000: - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: - case CPU_JZRISC: - case CPU_LOONGSON1: - case CPU_XLR: - case CPU_XLP: - cpu_wait = r4k_wait; - break; - - case CPU_RM7000: - cpu_wait = rm7k_wait_irqoff; - break; - - case CPU_M14KC: - case CPU_M14KEC: - case CPU_24K: - case CPU_34K: - case CPU_1004K: - cpu_wait = r4k_wait; - if (read_c0_config7() & MIPS_CONF7_WII) - cpu_wait = r4k_wait_irqoff; - break; - - case CPU_74K: - cpu_wait = r4k_wait; - if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) - cpu_wait = r4k_wait_irqoff; - break; - - case CPU_TX49XX: - cpu_wait = r4k_wait_irqoff; - break; - case CPU_ALCHEMY: - cpu_wait = au1k_wait; - break; - case CPU_20KC: - /* - * WAIT on Rev1.0 has E1, E2, E3 and E16. - * WAIT on Rev2.0 and Rev3.0 has E16. - * Rev3.1 WAIT is nop, why bother - */ - if ((c->processor_id & 0xff) <= 0x64) - break; - - /* - * Another rev is incremeting c0_count at a reduced clock - * rate while in WAIT mode. So we basically have the choice - * between using the cp0 timer as clocksource or avoiding - * the WAIT instruction. Until more details are known, - * disable the use of WAIT for 20Kc entirely. - cpu_wait = r4k_wait; - */ - break; - case CPU_RM9000: - if ((c->processor_id & 0x00ff) >= 0x40) - cpu_wait = r4k_wait; - break; - default: - break; - } -} - -static void smtc_idle_hook(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_idle_loop_hook(void); - - smtc_idle_loop_hook(); -#endif -} - -void arch_cpu_idle(void) -{ - smtc_idle_hook(); - if (cpu_wait) - cpu_wait(); - else - local_irq_enable(); -} diff --git a/trunk/arch/mips/kernel/kprobes.c b/trunk/arch/mips/kernel/kprobes.c index 1f8187ab0997..12bc4ebdf55b 100644 --- a/trunk/arch/mips/kernel/kprobes.c +++ b/trunk/arch/mips/kernel/kprobes.c @@ -207,10 +207,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - if (p->ainsn.insn) { - free_insn_slot(p->ainsn.insn, 0); - p->ainsn.insn = NULL; - } + free_insn_slot(p->ainsn.insn, 0); } static void save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/trunk/arch/mips/kernel/proc.c b/trunk/arch/mips/kernel/proc.c index acb34373679e..a3e461408b7e 100644 --- a/trunk/arch/mips/kernel/proc.c +++ b/trunk/arch/mips/kernel/proc.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/mips/kernel/process.c b/trunk/arch/mips/kernel/process.c index c6a041d9d05d..eb902c1f0cad 100644 --- a/trunk/arch/mips/kernel/process.c +++ b/trunk/arch/mips/kernel/process.c @@ -51,6 +51,19 @@ void arch_cpu_idle_dead(void) } #endif +void arch_cpu_idle(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + extern void smtc_idle_loop_hook(void); + + smtc_idle_loop_hook(); +#endif + if (cpu_wait) + (*cpu_wait)(); + else + local_irq_enable(); +} + asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); @@ -211,9 +224,6 @@ struct mips_frame_info { int pc_offset; }; -#define J_TARGET(pc,target) \ - (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) - static inline int is_ra_save_ins(union mips_instruction *ip) { #ifdef CONFIG_CPU_MICROMIPS @@ -254,7 +264,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip) #endif } -static inline int is_jump_ins(union mips_instruction *ip) +static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) { #ifdef CONFIG_CPU_MICROMIPS /* @@ -278,8 +288,6 @@ static inline int is_jump_ins(union mips_instruction *ip) return 0; return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); #else - if (ip->j_format.opcode == j_op) - return 1; if (ip->j_format.opcode == jal_op) return 1; if (ip->r_format.opcode != spec_op) @@ -342,7 +350,7 @@ static int get_frame_info(struct mips_frame_info *info) for (i = 0; i < max_insns; i++, ip++) { - if (is_jump_ins(ip)) + if (is_jal_jalr_jr_ins(ip)) break; if (!info->frame_size) { if (is_sp_move_ins(ip)) @@ -385,42 +393,15 @@ static int get_frame_info(struct mips_frame_info *info) static struct mips_frame_info schedule_mfi __read_mostly; -#ifdef CONFIG_KALLSYMS -static unsigned long get___schedule_addr(void) -{ - return kallsyms_lookup_name("__schedule"); -} -#else -static unsigned long get___schedule_addr(void) -{ - union mips_instruction *ip = (void *)schedule; - int max_insns = 8; - int i; - - for (i = 0; i < max_insns; i++, ip++) { - if (ip->j_format.opcode == j_op) - return J_TARGET(ip, ip->j_format.target); - } - return 0; -} -#endif - static int __init frame_info_init(void) { unsigned long size = 0; #ifdef CONFIG_KALLSYMS unsigned long ofs; -#endif - unsigned long addr; - - addr = get___schedule_addr(); - if (!addr) - addr = (unsigned long)schedule; -#ifdef CONFIG_KALLSYMS - kallsyms_lookup_size_offset(addr, &size, &ofs); + kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); #endif - schedule_mfi.func = (void *)addr; + schedule_mfi.func = schedule; schedule_mfi.func_size = size; get_frame_info(&schedule_mfi); diff --git a/trunk/arch/mips/kernel/rtlx.c b/trunk/arch/mips/kernel/rtlx.c index 6fa198db8999..93c070b41b0d 100644 --- a/trunk/arch/mips/kernel/rtlx.c +++ b/trunk/arch/mips/kernel/rtlx.c @@ -40,7 +40,6 @@ #include #include #include -#include static struct rtlx_info *rtlx; static int major; diff --git a/trunk/arch/mips/kernel/scall64-64.S b/trunk/arch/mips/kernel/scall64-64.S index 97a5909a61cf..36cfd4060e1f 100644 --- a/trunk/arch/mips/kernel/scall64-64.S +++ b/trunk/arch/mips/kernel/scall64-64.S @@ -423,5 +423,4 @@ sys_call_table: PTR sys_process_vm_writev /* 5305 */ PTR sys_kcmp PTR sys_finit_module - PTR sys_getdents64 .size sys_call_table,.-sys_call_table diff --git a/trunk/arch/mips/kernel/smp.c b/trunk/arch/mips/kernel/smp.c index 6e7862ab46cc..c17619fe18e3 100644 --- a/trunk/arch/mips/kernel/smp.c +++ b/trunk/arch/mips/kernel/smp.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/mips/kernel/smtc.c b/trunk/arch/mips/kernel/smtc.c index 75a4fd709841..31d22f3121c9 100644 --- a/trunk/arch/mips/kernel/smtc.c +++ b/trunk/arch/mips/kernel/smtc.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -112,7 +111,7 @@ static int vpe0limit; static int ipibuffers; static int nostlb; static int asidmask; -unsigned long smtc_asid_mask = 0xff; +unsigned int smtc_asid_mask = 0xff; static int __init vpe0tcs(char *str) { @@ -859,6 +858,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) unsigned long flags; int mtflags; unsigned long tcrestart; + extern void r4k_wait_irqoff(void), __pastwait(void); int set_resched_flag = (type == LINUX_SMP_IPI && action == SMP_RESCHEDULE_YOURSELF); @@ -914,7 +914,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) */ if (cpu_wait == r4k_wait_irqoff) { tcrestart = read_tc_c0_tcrestart(); - if (address_is_in_r4k_wait_irqoff(tcrestart)) { + if (tcrestart >= (unsigned long)r4k_wait_irqoff + && tcrestart < (unsigned long)__pastwait) { write_tc_c0_tcrestart(__pastwait); tcstatus &= ~TCSTATUS_IXMT; write_tc_c0_tcstatus(tcstatus); @@ -1394,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) asid = asid_cache(cpu); do { - if (!((asid += ASID_INC) & ASID_MASK) ) { + if (!ASID_MASK(ASID_INC(asid))) { if (cpu_has_vtag_icache) flush_icache_all(); /* Traverse all online CPUs (hack requires contiguous range) */ @@ -1413,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) mips_ihb(); } tcstat = read_tc_c0_tcstatus(); - smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); + smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i); if (!prevhalt) write_tc_c0_tchalt(0); } @@ -1422,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) asid = ASID_FIRST_VERSION; local_flush_tlb_all(); /* start new asid cycle */ } - } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); + } while (smtc_live_asid[tlb][ASID_MASK(asid)]); /* * SMTC shares the TLB within VPEs and possibly across all VPEs. @@ -1460,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid) tlb_read(); ehb(); ehi = read_c0_entryhi(); - if ((ehi & ASID_MASK) == asid) { + if (ASID_MASK(ehi) == asid) { /* * Invalidate only entries with specified ASID, * makiing sure all entries differ. diff --git a/trunk/arch/mips/kernel/traps.c b/trunk/arch/mips/kernel/traps.c index a75ae40184aa..77cff1f6d050 100644 --- a/trunk/arch/mips/kernel/traps.c +++ b/trunk/arch/mips/kernel/traps.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include #include @@ -58,6 +57,7 @@ #include extern void check_wait(void); +extern asmlinkage void r4k_wait(void); extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); extern u32 handle_tlbl[]; @@ -897,24 +897,22 @@ asmlinkage void do_bp(struct pt_regs *regs) asmlinkage void do_tr(struct pt_regs *regs) { - u32 opcode, tcode = 0; + unsigned int opcode, tcode = 0; u16 instr[2]; - unsigned long epc = msk_isa16_mode(exception_epc(regs)); + unsigned long epc = exception_epc(regs); - if (get_isa16_mode(regs->cp0_epc)) { - if (__get_user(instr[0], (u16 __user *)(epc + 0)) || - __get_user(instr[1], (u16 __user *)(epc + 2))) - goto out_sigsegv; - opcode = (instr[0] << 16) | instr[1]; - /* Immediate versions don't provide a code. */ - if (!(opcode & OPCODE)) - tcode = (opcode >> 12) & ((1 << 4) - 1); - } else { - if (__get_user(opcode, (u32 __user *)epc)) + if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || + (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) goto out_sigsegv; - /* Immediate versions don't provide a code. */ - if (!(opcode & OPCODE)) - tcode = (opcode >> 6) & ((1 << 10) - 1); + opcode = (instr[0] << 16) | instr[1]; + + /* Immediate versions don't provide a code. */ + if (!(opcode & OPCODE)) { + if (get_isa16_mode(regs->cp0_epc)) + /* microMIPS */ + tcode = (opcode >> 12) & 0x1f; + else + tcode = ((opcode >> 6) & ((1 << 10) - 1)); } do_trap_or_bp(regs, tcode, "Trap"); @@ -1544,7 +1542,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) extern char except_vec_vi, except_vec_vi_lui; extern char except_vec_vi_ori, except_vec_vi_end; extern char rollback_except_vec_vi; - char *vec_start = using_rollback_handler() ? + char *vec_start = (cpu_wait == r4k_wait) ? &rollback_except_vec_vi : &except_vec_vi; #ifdef CONFIG_MIPS_MT_SMTC /* @@ -1658,6 +1656,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) unsigned int cpu = smp_processor_id(); unsigned int status_set = ST0_CU0; unsigned int hwrena = cpu_hwrena_impl_bits; + unsigned long asid = 0; #ifdef CONFIG_MIPS_MT_SMTC int secondaryTC = 0; int bootTC = (cpu == 0); @@ -1741,8 +1740,9 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) } #endif /* CONFIG_MIPS_MT_SMTC */ - if (!cpu_data[cpu].asid_cache) - cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; + asid = ASID_FIRST_VERSION; + cpu_data[cpu].asid_cache = asid; + TLBMISS_HANDLER_SETUP(); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; @@ -1814,8 +1814,10 @@ void __init trap_init(void) extern char except_vec4; extern char except_vec3_r4000; unsigned long i; + int rollback; check_wait(); + rollback = (cpu_wait == r4k_wait); #if defined(CONFIG_KGDB) if (kgdb_early_setup) @@ -1892,8 +1894,7 @@ void __init trap_init(void) if (board_be_init) board_be_init(); - set_except_vector(0, using_rollback_handler() ? rollback_handle_int - : handle_int); + set_except_vector(0, rollback ? rollback_handle_int : handle_int); set_except_vector(1, handle_tlbm); set_except_vector(2, handle_tlbl); set_except_vector(3, handle_tlbs); diff --git a/trunk/arch/mips/kvm/kvm_mips.c b/trunk/arch/mips/kvm/kvm_mips.c index dd203e59e6fd..e0dad0289797 100644 --- a/trunk/arch/mips/kvm/kvm_mips.c +++ b/trunk/arch/mips/kvm/kvm_mips.c @@ -195,7 +195,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { - return -ENOIOCTLCMD; + return -EINVAL; } void kvm_arch_free_memslot(struct kvm_memory_slot *free, @@ -401,7 +401,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - return -ENOIOCTLCMD; + return -EINVAL; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) @@ -475,248 +475,14 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - return -ENOIOCTLCMD; + return -EINVAL; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - return -ENOIOCTLCMD; -} - -#define MIPS_CP0_32(_R, _S) \ - (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) - -#define MIPS_CP0_64(_R, _S) \ - (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) - -#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) -#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) -#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) -#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) -#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) -#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) -#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) -#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) -#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) -#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) -#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) -#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) -#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) -#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) -#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) -#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) -#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) -#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) -#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) -#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) -#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) -#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) -#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) - -static u64 kvm_mips_get_one_regs[] = { - KVM_REG_MIPS_R0, - KVM_REG_MIPS_R1, - KVM_REG_MIPS_R2, - KVM_REG_MIPS_R3, - KVM_REG_MIPS_R4, - KVM_REG_MIPS_R5, - KVM_REG_MIPS_R6, - KVM_REG_MIPS_R7, - KVM_REG_MIPS_R8, - KVM_REG_MIPS_R9, - KVM_REG_MIPS_R10, - KVM_REG_MIPS_R11, - KVM_REG_MIPS_R12, - KVM_REG_MIPS_R13, - KVM_REG_MIPS_R14, - KVM_REG_MIPS_R15, - KVM_REG_MIPS_R16, - KVM_REG_MIPS_R17, - KVM_REG_MIPS_R18, - KVM_REG_MIPS_R19, - KVM_REG_MIPS_R20, - KVM_REG_MIPS_R21, - KVM_REG_MIPS_R22, - KVM_REG_MIPS_R23, - KVM_REG_MIPS_R24, - KVM_REG_MIPS_R25, - KVM_REG_MIPS_R26, - KVM_REG_MIPS_R27, - KVM_REG_MIPS_R28, - KVM_REG_MIPS_R29, - KVM_REG_MIPS_R30, - KVM_REG_MIPS_R31, - - KVM_REG_MIPS_HI, - KVM_REG_MIPS_LO, - KVM_REG_MIPS_PC, - - KVM_REG_MIPS_CP0_INDEX, - KVM_REG_MIPS_CP0_CONTEXT, - KVM_REG_MIPS_CP0_PAGEMASK, - KVM_REG_MIPS_CP0_WIRED, - KVM_REG_MIPS_CP0_BADVADDR, - KVM_REG_MIPS_CP0_ENTRYHI, - KVM_REG_MIPS_CP0_STATUS, - KVM_REG_MIPS_CP0_CAUSE, - /* EPC set via kvm_regs, et al. */ - KVM_REG_MIPS_CP0_CONFIG, - KVM_REG_MIPS_CP0_CONFIG1, - KVM_REG_MIPS_CP0_CONFIG2, - KVM_REG_MIPS_CP0_CONFIG3, - KVM_REG_MIPS_CP0_CONFIG7, - KVM_REG_MIPS_CP0_ERROREPC -}; - -static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - s64 v; - - switch (reg->id) { - case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: - v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; - break; - case KVM_REG_MIPS_HI: - v = (long)vcpu->arch.hi; - break; - case KVM_REG_MIPS_LO: - v = (long)vcpu->arch.lo; - break; - case KVM_REG_MIPS_PC: - v = (long)vcpu->arch.pc; - break; - - case KVM_REG_MIPS_CP0_INDEX: - v = (long)kvm_read_c0_guest_index(cop0); - break; - case KVM_REG_MIPS_CP0_CONTEXT: - v = (long)kvm_read_c0_guest_context(cop0); - break; - case KVM_REG_MIPS_CP0_PAGEMASK: - v = (long)kvm_read_c0_guest_pagemask(cop0); - break; - case KVM_REG_MIPS_CP0_WIRED: - v = (long)kvm_read_c0_guest_wired(cop0); - break; - case KVM_REG_MIPS_CP0_BADVADDR: - v = (long)kvm_read_c0_guest_badvaddr(cop0); - break; - case KVM_REG_MIPS_CP0_ENTRYHI: - v = (long)kvm_read_c0_guest_entryhi(cop0); - break; - case KVM_REG_MIPS_CP0_STATUS: - v = (long)kvm_read_c0_guest_status(cop0); - break; - case KVM_REG_MIPS_CP0_CAUSE: - v = (long)kvm_read_c0_guest_cause(cop0); - break; - case KVM_REG_MIPS_CP0_ERROREPC: - v = (long)kvm_read_c0_guest_errorepc(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG: - v = (long)kvm_read_c0_guest_config(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG1: - v = (long)kvm_read_c0_guest_config1(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG2: - v = (long)kvm_read_c0_guest_config2(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG3: - v = (long)kvm_read_c0_guest_config3(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG7: - v = (long)kvm_read_c0_guest_config7(cop0); - break; - default: - return -EINVAL; - } - if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { - u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; - return put_user(v, uaddr64); - } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { - u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; - u32 v32 = (u32)v; - return put_user(v32, uaddr32); - } else { - return -EINVAL; - } -} - -static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - u64 v; - - if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { - u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; - - if (get_user(v, uaddr64) != 0) - return -EFAULT; - } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { - u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; - s32 v32; - - if (get_user(v32, uaddr32) != 0) - return -EFAULT; - v = (s64)v32; - } else { - return -EINVAL; - } - - switch (reg->id) { - case KVM_REG_MIPS_R0: - /* Silently ignore requests to set $0 */ - break; - case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: - vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; - break; - case KVM_REG_MIPS_HI: - vcpu->arch.hi = v; - break; - case KVM_REG_MIPS_LO: - vcpu->arch.lo = v; - break; - case KVM_REG_MIPS_PC: - vcpu->arch.pc = v; - break; - - case KVM_REG_MIPS_CP0_INDEX: - kvm_write_c0_guest_index(cop0, v); - break; - case KVM_REG_MIPS_CP0_CONTEXT: - kvm_write_c0_guest_context(cop0, v); - break; - case KVM_REG_MIPS_CP0_PAGEMASK: - kvm_write_c0_guest_pagemask(cop0, v); - break; - case KVM_REG_MIPS_CP0_WIRED: - kvm_write_c0_guest_wired(cop0, v); - break; - case KVM_REG_MIPS_CP0_BADVADDR: - kvm_write_c0_guest_badvaddr(cop0, v); - break; - case KVM_REG_MIPS_CP0_ENTRYHI: - kvm_write_c0_guest_entryhi(cop0, v); - break; - case KVM_REG_MIPS_CP0_STATUS: - kvm_write_c0_guest_status(cop0, v); - break; - case KVM_REG_MIPS_CP0_CAUSE: - kvm_write_c0_guest_cause(cop0, v); - break; - case KVM_REG_MIPS_CP0_ERROREPC: - kvm_write_c0_guest_errorepc(cop0, v); - break; - default: - return -EINVAL; - } - return 0; + return -EINVAL; } long @@ -725,38 +491,9 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; long r; + int intr; switch (ioctl) { - case KVM_SET_ONE_REG: - case KVM_GET_ONE_REG: { - struct kvm_one_reg reg; - if (copy_from_user(®, argp, sizeof(reg))) - return -EFAULT; - if (ioctl == KVM_SET_ONE_REG) - return kvm_mips_set_reg(vcpu, ®); - else - return kvm_mips_get_reg(vcpu, ®); - } - case KVM_GET_REG_LIST: { - struct kvm_reg_list __user *user_list = argp; - u64 __user *reg_dest; - struct kvm_reg_list reg_list; - unsigned n; - - if (copy_from_user(®_list, user_list, sizeof(reg_list))) - return -EFAULT; - n = reg_list.n; - reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); - if (copy_to_user(user_list, ®_list, sizeof(reg_list))) - return -EFAULT; - if (n < reg_list.n) - return -E2BIG; - reg_dest = user_list->reg; - if (copy_to_user(reg_dest, kvm_mips_get_one_regs, - sizeof(kvm_mips_get_one_regs))) - return -EFAULT; - return 0; - } case KVM_NMI: /* Treat the NMI as a CPU reset */ r = kvm_mips_reset_vcpu(vcpu); @@ -768,6 +505,8 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) if (copy_from_user(&irq, argp, sizeof(irq))) goto out; + intr = (int)irq.irq; + kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); @@ -775,7 +514,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) break; } default: - r = -ENOIOCTLCMD; + r = -EINVAL; } out: @@ -826,7 +565,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) switch (ioctl) { default: - r = -ENOIOCTLCMD; + r = -EINVAL; } return r; @@ -854,13 +593,13 @@ void kvm_arch_exit(void) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - return -ENOIOCTLCMD; + return -ENOTSUPP; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - return -ENOIOCTLCMD; + return -ENOTSUPP; } int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) @@ -870,12 +609,12 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - return -ENOIOCTLCMD; + return -ENOTSUPP; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - return -ENOIOCTLCMD; + return -ENOTSUPP; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) @@ -888,9 +627,6 @@ int kvm_dev_ioctl_check_extension(long ext) int r; switch (ext) { - case KVM_CAP_ONE_REG: - r = 1; - break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; @@ -899,6 +635,7 @@ int kvm_dev_ioctl_check_extension(long ext) break; } return r; + } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) @@ -940,28 +677,28 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) - vcpu->arch.gprs[i] = regs->gpr[i]; - vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ + for (i = 0; i < 32; i++) + vcpu->arch.gprs[i] = regs->gprs[i]; + vcpu->arch.hi = regs->hi; vcpu->arch.lo = regs->lo; vcpu->arch.pc = regs->pc; - return 0; + return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs); } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) - regs->gpr[i] = vcpu->arch.gprs[i]; + for (i = 0; i < 32; i++) + regs->gprs[i] = vcpu->arch.gprs[i]; regs->hi = vcpu->arch.hi; regs->lo = vcpu->arch.lo; regs->pc = vcpu->arch.pc; - return 0; + return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs); } void kvm_mips_comparecount_func(unsigned long data) diff --git a/trunk/arch/mips/kvm/kvm_mips_emul.c b/trunk/arch/mips/kvm/kvm_mips_emul.c index 4b6274b47f33..2b2bac9a40aa 100644 --- a/trunk/arch/mips/kvm/kvm_mips_emul.c +++ b/trunk/arch/mips/kvm/kvm_mips_emul.c @@ -525,18 +525,16 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, printk("MTCz, cop0->reg[EBASE]: %#lx\n", kvm_read_c0_guest_ebase(cop0)); } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { - uint32_t nasid = - vcpu->arch.gprs[rt] & ASID_MASK; + uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]); if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && - ((kvm_read_c0_guest_entryhi(cop0) & - ASID_MASK) != nasid)) { + (ASID_MASK(kvm_read_c0_guest_entryhi(cop0)) + != nasid)) { kvm_debug ("MTCz, change ASID from %#lx to %#lx\n", - kvm_read_c0_guest_entryhi(cop0) & - ASID_MASK, - vcpu->arch.gprs[rt] & ASID_MASK); + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)), + ASID_MASK(vcpu->arch.gprs[rt])); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); @@ -988,8 +986,7 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, * resulting handler will do the right thing */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | - (kvm_read_c0_guest_entryhi - (cop0) & ASID_MASK)); + ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); if (index < 0) { vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); @@ -1154,7 +1151,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1201,7 +1198,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1246,7 +1243,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1290,7 +1287,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1359,7 +1356,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; @@ -1786,8 +1783,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | - (kvm_read_c0_guest_entryhi - (vcpu->arch.cop0) & ASID_MASK)); + ASID_MASK(kvm_read_c0_guest_entryhi + (vcpu->arch.cop0))); if (index < 0) { if (exccode == T_TLB_LD_MISS) { er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); diff --git a/trunk/arch/mips/kvm/kvm_tlb.c b/trunk/arch/mips/kvm/kvm_tlb.c index c777dd36d4a8..89511a9258d3 100644 --- a/trunk/arch/mips/kvm/kvm_tlb.c +++ b/trunk/arch/mips/kvm/kvm_tlb.c @@ -17,8 +17,6 @@ #include #include #include -#include - #include #include @@ -53,13 +51,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn); uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) { - return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; + return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); } uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) { - return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; + return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); } inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) @@ -86,7 +84,7 @@ void kvm_mips_dump_host_tlbs(void) old_pagemask = read_c0_pagemask(); printk("HOST TLBs:\n"); - printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); + printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); for (i = 0; i < current_cpu_data.tlbsize; i++) { write_c0_index(i); @@ -171,27 +169,21 @@ void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu) } } -static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) +static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) { - int srcu_idx, err = 0; pfn_t pfn; if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) - return 0; + return; - srcu_idx = srcu_read_lock(&kvm->srcu); pfn = kvm_mips_gfn_to_pfn(kvm, gfn); if (kvm_mips_is_error_pfn(pfn)) { - kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); - err = -EFAULT; - goto out; + panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); } kvm->arch.guest_pmap[gfn] = pfn; -out: - srcu_read_unlock(&kvm->srcu, srcu_idx); - return err; + return; } /* Translate guest KSEG0 addresses to Host PA */ @@ -215,10 +207,7 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, gva); return KVM_INVALID_PAGE; } - - if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) - return KVM_INVALID_ADDR; - + kvm_mips_map_page(vcpu->kvm, gfn); return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; } @@ -321,11 +310,8 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, even = !(gfn & 0x1); vaddr = badvaddr & (PAGE_MASK << 1); - if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) - return -1; - - if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0) - return -1; + kvm_mips_map_page(vcpu->kvm, gfn); + kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1); if (even) { pfn0 = kvm->arch.guest_pmap[gfn]; @@ -403,11 +389,8 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, pfn0 = 0; pfn1 = 0; } else { - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) - return -1; - - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) - return -1; + kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT); + kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT); pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; @@ -445,7 +428,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && - (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { + (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { index = i; break; } @@ -643,7 +626,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, { unsigned long asid = asid_cache(cpu); - if (!((asid += ASID_INC) & ASID_MASK)) { + if (!(ASID_MASK(ASID_INC(asid)))) { if (cpu_has_vtag_icache) { flush_icache_all(); } @@ -821,8 +804,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (!newasid) { /* If we preempted while the guest was executing, then reload the pre-empted ASID */ if (current->flags & PF_VCPU) { - write_c0_entryhi(vcpu->arch. - preempt_entryhi & ASID_MASK); + write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); ehb(); } } else { @@ -834,13 +816,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) */ if (current->flags & PF_VCPU) { if (KVM_GUEST_KERNEL_MODE(vcpu)) - write_c0_entryhi(vcpu->arch. - guest_kernel_asid[cpu] & - ASID_MASK); + write_c0_entryhi(ASID_MASK(vcpu->arch. + guest_kernel_asid[cpu])); else - write_c0_entryhi(vcpu->arch. - guest_user_asid[cpu] & - ASID_MASK); + write_c0_entryhi(ASID_MASK(vcpu->arch. + guest_user_asid[cpu])); ehb(); } } @@ -899,8 +879,7 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) kvm_mips_guest_tlb_lookup(vcpu, ((unsigned long) opc & VPN2_MASK) | - (kvm_read_c0_guest_entryhi - (cop0) & ASID_MASK)); + ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); if (index < 0) { kvm_err ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", diff --git a/trunk/arch/mips/kvm/kvm_trap_emul.c b/trunk/arch/mips/kvm/kvm_trap_emul.c index 30d725321db1..466aeef044bd 100644 --- a/trunk/arch/mips/kvm/kvm_trap_emul.c +++ b/trunk/arch/mips/kvm/kvm_trap_emul.c @@ -345,6 +345,54 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) return ret; } +static int +kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + + kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]); + kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]); + kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]); + kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]); + kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]); + + kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]); + kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]); + kvm_write_c0_guest_pagemask(cop0, + regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]); + kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]); + kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]); + + return 0; +} + +static int +kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + + regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0); + regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0); + regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0); + regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0); + regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0); + + regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0); + regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0); + regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] = + kvm_read_c0_guest_pagemask(cop0); + regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0); + regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0); + + regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0); + + return 0; +} + static int kvm_trap_emul_vm_init(struct kvm *kvm) { return 0; @@ -423,6 +471,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .dequeue_io_int = kvm_mips_dequeue_io_int_cb, .irq_deliver = kvm_mips_irq_deliver_cb, .irq_clear = kvm_mips_irq_clear_cb, + .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs, + .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs, }; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) diff --git a/trunk/arch/mips/lantiq/xway/gptu.c b/trunk/arch/mips/lantiq/xway/gptu.c index 850821df924c..9861c8669fab 100644 --- a/trunk/arch/mips/lantiq/xway/gptu.c +++ b/trunk/arch/mips/lantiq/xway/gptu.c @@ -144,6 +144,10 @@ static int gptu_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Failed to get resource\n"); + return -ENOMEM; + } /* remap gptu register range */ gptu_membase = devm_ioremap_resource(&pdev->dev, res); @@ -165,8 +169,6 @@ static int gptu_probe(struct platform_device *pdev) if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) { dev_err(&pdev->dev, "Failed to find magic\n"); gptu_hwexit(); - clk_disable(clk); - clk_put(clk); return -ENAVAIL; } diff --git a/trunk/arch/mips/lib/dump_tlb.c b/trunk/arch/mips/lib/dump_tlb.c index 32b9f21bfd85..8a12d00908e0 100644 --- a/trunk/arch/mips/lib/dump_tlb.c +++ b/trunk/arch/mips/lib/dump_tlb.c @@ -11,6 +11,7 @@ #include #include #include +#include static inline const char *msk2str(unsigned int mask) { @@ -55,7 +56,7 @@ static void dump_tlb(int first, int last) s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); - asid = s_entryhi & 0xff; + asid = ASID_MASK(s_entryhi); for (i = first; i <= last; i++) { write_c0_index(i); @@ -85,7 +86,7 @@ static void dump_tlb(int first, int last) printk("va=%0*lx asid=%02lx\n", width, (entryhi & ~0x1fffUL), - entryhi & 0xff); + ASID_MASK(entryhi)); printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", width, (entrylo0 << 6) & PAGE_MASK, c0, diff --git a/trunk/arch/mips/lib/r3k_dump_tlb.c b/trunk/arch/mips/lib/r3k_dump_tlb.c index 91615c2ef0cf..8327698b9937 100644 --- a/trunk/arch/mips/lib/r3k_dump_tlb.c +++ b/trunk/arch/mips/lib/r3k_dump_tlb.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -21,7 +22,7 @@ static void dump_tlb(int first, int last) unsigned int asid; unsigned long entryhi, entrylo0; - asid = read_c0_entryhi() & 0xfc0; + asid = ASID_MASK(read_c0_entryhi()); for (i = first; i <= last; i++) { write_c0_index(i<<8); @@ -35,7 +36,7 @@ static void dump_tlb(int first, int last) /* Unused entries have a virtual address of KSEG0. */ if ((entryhi & 0xffffe000) != 0x80000000 - && (entryhi & 0xfc0) == asid) { + && (ASID_MASK(entryhi) == asid)) { /* * Only print entries in use */ @@ -44,7 +45,7 @@ static void dump_tlb(int first, int last) printk("va=%08lx asid=%08lx" " [pa=%06lx n=%d d=%d v=%d g=%d]", (entryhi & 0xffffe000), - entryhi & 0xfc0, + ASID_MASK(entryhi), entrylo0 & PAGE_MASK, (entrylo0 & (1 << 11)) ? 1 : 0, (entrylo0 & (1 << 10)) ? 1 : 0, diff --git a/trunk/arch/mips/loongson/common/reset.c b/trunk/arch/mips/loongson/common/reset.c index 65bfbb5d06f4..35c8c6468494 100644 --- a/trunk/arch/mips/loongson/common/reset.c +++ b/trunk/arch/mips/loongson/common/reset.c @@ -12,7 +12,6 @@ #include #include -#include #include #include diff --git a/trunk/arch/mips/loongson1/common/reset.c b/trunk/arch/mips/loongson1/common/reset.c index 547f34b69e4c..d4f610f9604a 100644 --- a/trunk/arch/mips/loongson1/common/reset.c +++ b/trunk/arch/mips/loongson1/common/reset.c @@ -9,7 +9,6 @@ #include #include -#include #include #include diff --git a/trunk/arch/mips/mm/tlb-r3k.c b/trunk/arch/mips/mm/tlb-r3k.c index a63d1ed0827f..4a13c150f31b 100644 --- a/trunk/arch/mips/mm/tlb-r3k.c +++ b/trunk/arch/mips/mm/tlb-r3k.c @@ -51,7 +51,7 @@ void local_flush_tlb_all(void) #endif local_irq_save(flags); - old_ctx = read_c0_entryhi() & ASID_MASK; + old_ctx = ASID_MASK(read_c0_entryhi()); write_c0_entrylo0(0); entry = r3k_have_wired_reg ? read_c0_wired() : 8; for (; entry < current_cpu_data.tlbsize; entry++) { @@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", - cpu_context(cpu, mm) & ASID_MASK, start, end); + ASID_MASK(cpu_context(cpu, mm)), start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { - int oldpid = read_c0_entryhi() & ASID_MASK; - int newpid = cpu_context(cpu, mm) & ASID_MASK; + int oldpid = ASID_MASK(read_c0_entryhi()); + int newpid = ASID_MASK(cpu_context(cpu, mm)); start &= PAGE_MASK; end += PAGE_SIZE - 1; @@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) #ifdef DEBUG_TLB printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); #endif - newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; + newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm)); page &= PAGE_MASK; local_irq_save(flags); - oldpid = read_c0_entryhi() & ASID_MASK; + oldpid = ASID_MASK(read_c0_entryhi()); write_c0_entryhi(page | newpid); BARRIER; tlb_probe(); @@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) if (current->active_mm != vma->vm_mm) return; - pid = read_c0_entryhi() & ASID_MASK; + pid = ASID_MASK(read_c0_entryhi()); #ifdef DEBUG_TLB - if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { + if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) { printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", (cpu_context(cpu, vma->vm_mm)), pid); } @@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, local_irq_save(flags); /* Save old context and create impossible VPN2 value */ - old_ctx = read_c0_entryhi() & ASID_MASK; + old_ctx = ASID_MASK(read_c0_entryhi()); old_pagemask = read_c0_pagemask(); w = read_c0_wired(); write_c0_wired(w + 1); @@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #endif local_irq_save(flags); - old_ctx = read_c0_entryhi() & ASID_MASK; + old_ctx = ASID_MASK(read_c0_entryhi()); write_c0_entrylo0(entrylo0); write_c0_entryhi(entryhi); write_c0_index(wired); diff --git a/trunk/arch/mips/mm/tlb-r4k.c b/trunk/arch/mips/mm/tlb-r4k.c index c643de4c473a..09653b290d53 100644 --- a/trunk/arch/mips/mm/tlb-r4k.c +++ b/trunk/arch/mips/mm/tlb-r4k.c @@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ENTER_CRITICAL(flags); - pid = read_c0_entryhi() & ASID_MASK; + pid = ASID_MASK(read_c0_entryhi()); address &= (PAGE_MASK << 1); write_c0_entryhi(address | pid); pgdp = pgd_offset(vma->vm_mm, address); diff --git a/trunk/arch/mips/mm/tlb-r8k.c b/trunk/arch/mips/mm/tlb-r8k.c index 91c2499f806a..122f9207f49e 100644 --- a/trunk/arch/mips/mm/tlb-r8k.c +++ b/trunk/arch/mips/mm/tlb-r8k.c @@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) if (current->active_mm != vma->vm_mm) return; - pid = read_c0_entryhi() & ASID_MASK; + pid = ASID_MASK(read_c0_entryhi()); local_irq_save(flags); address &= PAGE_MASK; diff --git a/trunk/arch/mips/mm/tlbex.c b/trunk/arch/mips/mm/tlbex.c index afeef93f81a7..4d46d3787576 100644 --- a/trunk/arch/mips/mm/tlbex.c +++ b/trunk/arch/mips/mm/tlbex.c @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -301,6 +302,82 @@ static u32 tlb_handler[128] __cpuinitdata; static struct uasm_label labels[128] __cpuinitdata; static struct uasm_reloc relocs[128] __cpuinitdata; +#ifdef CONFIG_64BIT +static int check_for_high_segbits __cpuinitdata; +#endif + +static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop, + unsigned int i_const) +{ + unsigned int **p; + + for (p = start; p < stop; p++) { +#ifndef CONFIG_CPU_MICROMIPS + unsigned int *ip; + + ip = *p; + *ip = (*ip & 0xffff0000) | i_const; +#else + unsigned short *ip; + + ip = ((unsigned short *)((unsigned int)*p - 1)); + if ((*ip & 0xf000) == 0x4000) { + *ip &= 0xfff1; + *ip |= (i_const << 1); + } else if ((*ip & 0xf000) == 0x6000) { + *ip &= 0xfff1; + *ip |= ((i_const >> 2) << 1); + } else { + ip++; + *ip = i_const; + } +#endif + local_flush_icache_range((unsigned long)ip, + (unsigned long)ip + sizeof(*ip)); + } +} + +#define asid_insn_fixup(section, const) \ +do { \ + extern unsigned int *__start_ ## section; \ + extern unsigned int *__stop_ ## section; \ + insn_fixup(&__start_ ## section, &__stop_ ## section, const); \ +} while(0) + +/* + * Caller is assumed to flush the caches before the first context switch. + */ +static void __cpuinit setup_asid(unsigned int inc, unsigned int mask, + unsigned int version_mask, + unsigned int first_version) +{ + extern asmlinkage void handle_ri_rdhwr_vivt(void); + unsigned long *vivt_exc; + +#ifdef CONFIG_CPU_MICROMIPS + /* + * Worst case optimised microMIPS addiu instructions support + * only a 3-bit immediate value. + */ + if(inc > 7) + panic("Invalid ASID increment value!"); +#endif + asid_insn_fixup(__asid_inc, inc); + asid_insn_fixup(__asid_mask, mask); + asid_insn_fixup(__asid_version_mask, version_mask); + asid_insn_fixup(__asid_first_version, first_version); + + /* Patch up the 'handle_ri_rdhwr_vivt' handler. */ + vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt; +#ifdef CONFIG_CPU_MICROMIPS + vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1); +#endif + vivt_exc++; + *vivt_exc = (*vivt_exc & ~mask) | mask; + + current_cpu_data.asid_cache = first_version; +} + static int check_for_high_segbits __cpuinitdata; static unsigned int kscratch_used_mask __cpuinitdata; @@ -2179,6 +2256,7 @@ void __cpuinit build_tlb_refill_handler(void) case CPU_TX3922: case CPU_TX3927: #ifndef CONFIG_MIPS_PGD_C0_CONTEXT + setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000); if (cpu_has_local_ebase) build_r3000_tlb_refill_handler(); if (!run_once) { @@ -2204,6 +2282,11 @@ void __cpuinit build_tlb_refill_handler(void) break; default: +#ifndef CONFIG_MIPS_MT_SMTC + setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000); +#else + setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000); +#endif if (!run_once) { scratch_reg = allocate_kscratch(); #ifdef CONFIG_MIPS_PGD_C0_CONTEXT diff --git a/trunk/arch/mips/netlogic/xlp/setup.c b/trunk/arch/mips/netlogic/xlp/setup.c index eaa99d28cb8e..af319143b591 100644 --- a/trunk/arch/mips/netlogic/xlp/setup.c +++ b/trunk/arch/mips/netlogic/xlp/setup.c @@ -37,7 +37,6 @@ #include #include -#include #include #include #include diff --git a/trunk/arch/mips/netlogic/xlr/setup.c b/trunk/arch/mips/netlogic/xlr/setup.c index 89c8c1066632..e3e094100e3e 100644 --- a/trunk/arch/mips/netlogic/xlr/setup.c +++ b/trunk/arch/mips/netlogic/xlr/setup.c @@ -36,7 +36,6 @@ #include #include -#include #include #include #include diff --git a/trunk/arch/mips/pmcs-msp71xx/msp_prom.c b/trunk/arch/mips/pmcs-msp71xx/msp_prom.c index 1c9897531660..0edb89a63516 100644 --- a/trunk/arch/mips/pmcs-msp71xx/msp_prom.c +++ b/trunk/arch/mips/pmcs-msp71xx/msp_prom.c @@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c) return 0; /* foo */ } -int str2eaddr(unsigned char *ea, unsigned char *str) +static inline int str2eaddr(unsigned char *ea, unsigned char *str) { int index = 0; unsigned char num = 0; diff --git a/trunk/arch/mips/pmcs-msp71xx/msp_setup.c b/trunk/arch/mips/pmcs-msp71xx/msp_setup.c index 396b2967ad85..1651cfdbfe7b 100644 --- a/trunk/arch/mips/pmcs-msp71xx/msp_setup.c +++ b/trunk/arch/mips/pmcs-msp71xx/msp_setup.c @@ -12,7 +12,6 @@ #include #include -#include #include #include #include diff --git a/trunk/arch/mips/ralink/dts/rt3050.dtsi b/trunk/arch/mips/ralink/dts/rt3050.dtsi index e3203d414fee..ef7da1e227e6 100644 --- a/trunk/arch/mips/ralink/dts/rt3050.dtsi +++ b/trunk/arch/mips/ralink/dts/rt3050.dtsi @@ -55,14 +55,4 @@ reg-shift = <2>; }; }; - - usb@101c0000 { - compatible = "ralink,rt3050-usb", "snps,dwc2"; - reg = <0x101c0000 40000>; - - interrupt-parent = <&intc>; - interrupts = <18>; - - status = "disabled"; - }; }; diff --git a/trunk/arch/mips/ralink/dts/rt3052_eval.dts b/trunk/arch/mips/ralink/dts/rt3052_eval.dts index 0ac73ea28198..c18c9a84f4c4 100644 --- a/trunk/arch/mips/ralink/dts/rt3052_eval.dts +++ b/trunk/arch/mips/ralink/dts/rt3052_eval.dts @@ -43,8 +43,4 @@ reg = <0x50000 0x7b0000>; }; }; - - usb@101c0000 { - status = "ok"; - }; }; diff --git a/trunk/arch/mips/ralink/of.c b/trunk/arch/mips/ralink/of.c index 6b5f3406f414..fb1569580def 100644 --- a/trunk/arch/mips/ralink/of.c +++ b/trunk/arch/mips/ralink/of.c @@ -88,7 +88,7 @@ void __init plat_mem_setup(void) __dt_setup_arch(&__dtb_start); if (soc_info.mem_size) - add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, + add_memory_region(soc_info.mem_base, soc_info.mem_size, BOOT_MEM_RAM); else detect_memory_region(soc_info.mem_base, diff --git a/trunk/arch/mips/txx9/generic/setup.c b/trunk/arch/mips/txx9/generic/setup.c index 681e7f86c080..5364aabc2102 100644 --- a/trunk/arch/mips/txx9/generic/setup.c +++ b/trunk/arch/mips/txx9/generic/setup.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/mips/vr41xx/common/pmu.c b/trunk/arch/mips/vr41xx/common/pmu.c index d7f755833c3f..70a3f90131d8 100644 --- a/trunk/arch/mips/vr41xx/common/pmu.c +++ b/trunk/arch/mips/vr41xx/common/pmu.c @@ -27,7 +27,6 @@ #include #include -#include #include #include #include diff --git a/trunk/arch/mips/wrppmc/reset.c b/trunk/arch/mips/wrppmc/reset.c index 80beb188ed47..cc5474b24f06 100644 --- a/trunk/arch/mips/wrppmc/reset.c +++ b/trunk/arch/mips/wrppmc/reset.c @@ -9,7 +9,6 @@ #include #include -#include #include #include diff --git a/trunk/arch/mn10300/include/asm/irqflags.h b/trunk/arch/mn10300/include/asm/irqflags.h index 8730c0a3c37d..678f68d5f37b 100644 --- a/trunk/arch/mn10300/include/asm/irqflags.h +++ b/trunk/arch/mn10300/include/asm/irqflags.h @@ -13,8 +13,9 @@ #define _ASM_IRQFLAGS_H #include -/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */ -#include +#ifndef __ASSEMBLY__ +#include +#endif /* * interrupt control diff --git a/trunk/arch/mn10300/include/asm/pci.h b/trunk/arch/mn10300/include/asm/pci.h index 6f31cc0f1a87..8137c25c4e15 100644 --- a/trunk/arch/mn10300/include/asm/pci.h +++ b/trunk/arch/mn10300/include/asm/pci.h @@ -103,6 +103,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return channel ? 15 : 14; } -#include - #endif /* _ASM_PCI_H */ diff --git a/trunk/arch/mn10300/include/asm/pgtable.h b/trunk/arch/mn10300/include/asm/pgtable.h index 2ddaa67e7983..a1e894b5f65b 100644 --- a/trunk/arch/mn10300/include/asm/pgtable.h +++ b/trunk/arch/mn10300/include/asm/pgtable.h @@ -486,6 +486,9 @@ extern void update_mmu_cache(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range((vma), (vaddr), (pfn), (size), (prot)) + #define MK_IOSPACE_PFN(space, pfn) (pfn) #define GET_IOSPACE(pfn) 0 #define GET_PFN(pfn) (pfn) diff --git a/trunk/arch/mn10300/include/asm/smp.h b/trunk/arch/mn10300/include/asm/smp.h index 56c42417d428..6745dbe64944 100644 --- a/trunk/arch/mn10300/include/asm/smp.h +++ b/trunk/arch/mn10300/include/asm/smp.h @@ -24,7 +24,6 @@ #ifndef __ASSEMBLY__ #include #include -#include #endif #ifdef CONFIG_SMP @@ -86,7 +85,7 @@ extern cpumask_t cpu_boot_map; extern void smp_init_cpus(void); extern void smp_cache_interrupt(void); extern void send_IPI_allbutself(int irq); -extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait); +extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); @@ -101,7 +100,6 @@ extern void __cpu_die(unsigned int cpu); #ifndef __ASSEMBLY__ static inline void smp_init_cpus(void) {} -#define raw_smp_processor_id() 0 #endif /* __ASSEMBLY__ */ #endif /* CONFIG_SMP */ diff --git a/trunk/arch/mn10300/include/asm/uaccess.h b/trunk/arch/mn10300/include/asm/uaccess.h index d7966e0f7698..780560b330d9 100644 --- a/trunk/arch/mn10300/include/asm/uaccess.h +++ b/trunk/arch/mn10300/include/asm/uaccess.h @@ -161,7 +161,7 @@ struct __large_struct { unsigned long buf[100]; }; #define __get_user_check(x, ptr, size) \ ({ \ - const __typeof__(*(ptr))* __guc_ptr = (ptr); \ + const __typeof__(ptr) __guc_ptr = (ptr); \ int _e; \ if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \ _e = __get_user_nocheck((x), __guc_ptr, (size)); \ diff --git a/trunk/arch/mn10300/kernel/entry.S b/trunk/arch/mn10300/kernel/entry.S index 222152a3f751..68fcab8f8f6f 100644 --- a/trunk/arch/mn10300/kernel/entry.S +++ b/trunk/arch/mn10300/kernel/entry.S @@ -60,7 +60,6 @@ ENTRY(ret_from_kernel_thread) mov (REG_D0,fp),d0 mov (REG_A0,fp),a0 calls (a0) - GET_THREAD_INFO a2 # A2 must be set on return from sys_exit() clr d0 mov d0,(REG_D0,fp) jmp syscall_exit @@ -108,10 +107,10 @@ syscall_exit_work: and EPSW_nSL,d0 beq resume_kernel # returning to supervisor mode - LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call - # schedule() instead btst _TIF_SYSCALL_TRACE,d2 beq work_pending + LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call + # schedule() instead mov fp,d0 call syscall_trace_exit[],0 # do_syscall_trace(regs) jmp resume_userspace @@ -124,7 +123,6 @@ work_pending: work_resched: call schedule[],0 -resume_userspace: # make sure we don't miss an interrupt setting need_resched or # sigpending between sampling and the rti LOCAL_IRQ_DISABLE @@ -133,8 +131,6 @@ resume_userspace: mov (TI_flags,a2),d2 btst _TIF_WORK_MASK,d2 beq restore_all - - LOCAL_IRQ_ENABLE btst _TIF_NEED_RESCHED,d2 bne work_resched @@ -173,6 +169,17 @@ ret_from_intr: and EPSW_nSL,d0 beq resume_kernel # returning to supervisor mode +ENTRY(resume_userspace) + # make sure we don't miss an interrupt setting need_resched or + # sigpending between sampling and the rti + LOCAL_IRQ_DISABLE + + # is there any work to be done on int/exception return? + mov (TI_flags,a2),d2 + btst _TIF_WORK_MASK,d2 + bne work_pending + jmp restore_all + #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) LOCAL_IRQ_DISABLE diff --git a/trunk/arch/mn10300/kernel/setup.c b/trunk/arch/mn10300/kernel/setup.c index ebac9c11f796..33c3bd1e5c6d 100644 --- a/trunk/arch/mn10300/kernel/setup.c +++ b/trunk/arch/mn10300/kernel/setup.c @@ -38,7 +38,6 @@ struct mn10300_cpuinfo boot_cpu_data; /* For PCI or other memory-mapped resources */ unsigned long pci_mem_start = 0x18000000; -static char __initdata cmd_line[COMMAND_LINE_SIZE]; char redboot_command_line[COMMAND_LINE_SIZE] = "console=ttyS0,115200 root=/dev/mtdblock3 rw"; @@ -75,19 +74,45 @@ static const char *const mn10300_cputypes[] = { }; /* - * Pick out the memory size. We look for mem=size, - * where size is "size[KkMm]" + * */ -static int __init early_mem(char *p) +static void __init parse_mem_cmdline(char **cmdline_p) { - memory_size = memparse(p, &p); + char *from, *to, c; + + /* save unparsed command line copy for /proc/cmdline */ + strcpy(boot_command_line, redboot_command_line); + + /* see if there's an explicit memory size option */ + from = redboot_command_line; + to = redboot_command_line; + c = ' '; + + for (;;) { + if (c == ' ' && !memcmp(from, "mem=", 4)) { + if (to != redboot_command_line) + to--; + memory_size = memparse(from + 4, &from); + } + + c = *(from++); + if (!c) + break; + + *(to++) = c; + } + + *to = '\0'; + *cmdline_p = redboot_command_line; if (memory_size == 0) panic("Memory size not known\n"); - return 0; + memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS + + memory_size; + if (memory_end > phys_memory_end) + memory_end = phys_memory_end; } -early_param("mem", early_mem); /* * architecture specific setup @@ -100,20 +125,7 @@ void __init setup_arch(char **cmdline_p) cpu_init(); unit_setup(); smp_init_cpus(); - - /* save unparsed command line copy for /proc/cmdline */ - strlcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE); - - /* populate cmd_line too for later use, preserving boot_command_line */ - strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); - *cmdline_p = cmd_line; - - parse_early_param(); - - memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS + - memory_size; - if (memory_end > phys_memory_end) - memory_end = phys_memory_end; + parse_mem_cmdline(cmdline_p); init_mm.start_code = (unsigned long)&_text; init_mm.end_code = (unsigned long) &_etext; diff --git a/trunk/arch/mn10300/unit-asb2305/pci-asb2305.c b/trunk/arch/mn10300/unit-asb2305/pci-asb2305.c index febb9cd83177..c4e2e79281e8 100644 --- a/trunk/arch/mn10300/unit-asb2305/pci-asb2305.c +++ b/trunk/arch/mn10300/unit-asb2305/pci-asb2305.c @@ -221,7 +221,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, /* Leave vm_pgoff as-is, the PCI space address is the physical * address on this platform. */ - vma->vm_flags |= VM_LOCKED; + vma->vm_flags |= VM_LOCKED | VM_IO; prot = pgprot_val(vma->vm_page_prot); prot &= ~_PAGE_CACHE; diff --git a/trunk/arch/mn10300/unit-asb2305/pci.c b/trunk/arch/mn10300/unit-asb2305/pci.c index e37fac0461f3..1adcf024bb9a 100644 --- a/trunk/arch/mn10300/unit-asb2305/pci.c +++ b/trunk/arch/mn10300/unit-asb2305/pci.c @@ -19,7 +19,6 @@ #include #include #include -#include #include "pci-asb2305.h" unsigned int pci_probe = 1; diff --git a/trunk/arch/openrisc/include/asm/pgtable.h b/trunk/arch/openrisc/include/asm/pgtable.h index 37bf6a3ef8f4..14c900cfd30a 100644 --- a/trunk/arch/openrisc/include/asm/pgtable.h +++ b/trunk/arch/openrisc/include/asm/pgtable.h @@ -446,6 +446,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #include /* diff --git a/trunk/arch/parisc/Kconfig b/trunk/arch/parisc/Kconfig index 6507dabdd5dd..cad060f288cf 100644 --- a/trunk/arch/parisc/Kconfig +++ b/trunk/arch/parisc/Kconfig @@ -245,7 +245,7 @@ config SMP config IRQSTACKS bool "Use separate kernel stacks when processing interrupts" - default y + default n help If you say Y here the kernel will use separate kernel stacks for handling hard and soft interrupts. This can help avoid diff --git a/trunk/arch/parisc/Makefile b/trunk/arch/parisc/Makefile index 96ec3982be8d..2f967cc6649e 100644 --- a/trunk/arch/parisc/Makefile +++ b/trunk/arch/parisc/Makefile @@ -23,21 +23,24 @@ NM = sh $(srctree)/arch/parisc/nm CHECKFLAGS += -D__hppa__=1 LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) +MACHINE := $(shell uname -m) +NATIVE := $(if $(filter parisc%,$(MACHINE)),1,0) + ifdef CONFIG_64BIT UTS_MACHINE := parisc64 CHECKFLAGS += -D__LP64__=1 -m64 -CC_ARCHES = hppa64 +WIDTH := 64 else # 32-bit -CC_ARCHES = hppa hppa2.0 hppa1.1 +WIDTH := endif -ifneq ($(SUBARCH),$(UTS_MACHINE)) - ifeq ($(CROSS_COMPILE),) - CC_SUFFIXES = linux linux-gnu unknown-linux-gnu - CROSS_COMPILE := $(call cc-cross-prefix, \ - $(foreach a,$(CC_ARCHES), \ - $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) - endif +# attempt to help out folks who are cross-compiling +ifeq ($(NATIVE),1) +CROSS_COMPILE := hppa$(WIDTH)-linux- +else + ifeq ($(CROSS_COMPILE),) + CROSS_COMPILE := hppa$(WIDTH)-linux-gnu- + endif endif OBJCOPY_FLAGS =-O binary -R .note -R .comment -S @@ -66,7 +69,7 @@ KBUILD_CFLAGS_KERNEL += -mlong-calls endif # select which processor to optimise for -cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100 +cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100 cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200 cflags-$(CONFIG_PA7100LC) += -march=1.1 -mschedule=7100LC cflags-$(CONFIG_PA7300LC) += -march=1.1 -mschedule=7300 diff --git a/trunk/arch/parisc/hpux/fs.c b/trunk/arch/parisc/hpux/fs.c index 88d0962de65a..838b479a42c4 100644 --- a/trunk/arch/parisc/hpux/fs.c +++ b/trunk/arch/parisc/hpux/fs.c @@ -60,7 +60,6 @@ struct hpux_dirent { }; struct getdents_callback { - struct dir_context ctx; struct hpux_dirent __user *current_dir; struct hpux_dirent __user *previous; int count; @@ -111,23 +110,24 @@ int hpux_getdents(unsigned int fd, struct hpux_dirent __user *dirent, unsigned i { struct fd arg; struct hpux_dirent __user * lastdirent; - struct getdents_callback buf = { - .ctx.actor = filldir, - .current_dir = dirent, - .count = count - }; + struct getdents_callback buf; int error; arg = fdget(fd); if (!arg.file) return -EBADF; - error = iterate_dir(arg.file, &buf.ctx); + buf.current_dir = dirent; + buf.previous = NULL; + buf.count = count; + buf.error = 0; + + error = vfs_readdir(arg.file, filldir, &buf); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { - if (put_user(buf.ctx.pos, &lastdirent->d_off)) + if (put_user(arg.file->f_pos, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.count; diff --git a/trunk/arch/parisc/include/asm/assembly.h b/trunk/arch/parisc/include/asm/assembly.h index 0da848232344..89fb40005e3f 100644 --- a/trunk/arch/parisc/include/asm/assembly.h +++ b/trunk/arch/parisc/include/asm/assembly.h @@ -438,6 +438,7 @@ SAVE_SP (%sr4, PT_SR4 (\regs)) SAVE_SP (%sr5, PT_SR5 (\regs)) SAVE_SP (%sr6, PT_SR6 (\regs)) + SAVE_SP (%sr7, PT_SR7 (\regs)) SAVE_CR (%cr17, PT_IASQ0(\regs)) mtctl %r0, %cr17 diff --git a/trunk/arch/parisc/include/asm/hardirq.h b/trunk/arch/parisc/include/asm/hardirq.h index 241c34518465..12373c4dabab 100644 --- a/trunk/arch/parisc/include/asm/hardirq.h +++ b/trunk/arch/parisc/include/asm/hardirq.h @@ -11,20 +11,15 @@ #include #include -#ifdef CONFIG_IRQSTACKS -#define __ARCH_HAS_DO_SOFTIRQ -#endif - typedef struct { unsigned int __softirq_pending; +#ifdef CONFIG_DEBUG_STACKOVERFLOW unsigned int kernel_stack_usage; - unsigned int irq_stack_usage; +#endif #ifdef CONFIG_SMP unsigned int irq_resched_count; unsigned int irq_call_count; #endif - unsigned int irq_unaligned_count; - unsigned int irq_fpassist_count; unsigned int irq_tlb_count; } ____cacheline_aligned irq_cpustat_t; @@ -33,7 +28,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define __ARCH_IRQ_STAT #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) -#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member) #define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) #define __ARCH_SET_SOFTIRQ_PENDING diff --git a/trunk/arch/parisc/include/asm/mmzone.h b/trunk/arch/parisc/include/asm/mmzone.h index b6b34a0987e7..0e625ab9aaec 100644 --- a/trunk/arch/parisc/include/asm/mmzone.h +++ b/trunk/arch/parisc/include/asm/mmzone.h @@ -27,7 +27,7 @@ extern struct node_map_data node_data[]; #define PFNNID_SHIFT (30 - PAGE_SHIFT) #define PFNNID_MAP_MAX 512 /* support 512GB */ -extern signed char pfnnid_map[PFNNID_MAP_MAX]; +extern unsigned char pfnnid_map[PFNNID_MAP_MAX]; #ifndef CONFIG_64BIT #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) @@ -39,14 +39,17 @@ extern signed char pfnnid_map[PFNNID_MAP_MAX]; static inline int pfn_to_nid(unsigned long pfn) { unsigned int i; + unsigned char r; if (unlikely(pfn_is_io(pfn))) return 0; i = pfn >> PFNNID_SHIFT; BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); + r = pfnnid_map[i]; + BUG_ON(r == 0xff); - return pfnnid_map[i]; + return (int)r; } static inline int pfn_valid(int pfn) diff --git a/trunk/arch/parisc/include/asm/pci.h b/trunk/arch/parisc/include/asm/pci.h index 465154076d23..3234f492d575 100644 --- a/trunk/arch/parisc/include/asm/pci.h +++ b/trunk/arch/parisc/include/asm/pci.h @@ -225,9 +225,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return channel ? 15 : 14; } -#define HAVE_PCI_MMAP - -extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); - #endif /* __ASM_PARISC_PCI_H */ diff --git a/trunk/arch/parisc/include/asm/pgtable.h b/trunk/arch/parisc/include/asm/pgtable.h index 34899b5d959a..1e40d7f86be3 100644 --- a/trunk/arch/parisc/include/asm/pgtable.h +++ b/trunk/arch/parisc/include/asm/pgtable.h @@ -506,6 +506,9 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, #endif +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE) /* We provide our own get_unmapped_area to provide cache coherency */ diff --git a/trunk/arch/parisc/include/asm/processor.h b/trunk/arch/parisc/include/asm/processor.h index cc2290a3cace..064015547d1e 100644 --- a/trunk/arch/parisc/include/asm/processor.h +++ b/trunk/arch/parisc/include/asm/processor.h @@ -17,6 +17,7 @@ #include #include #include + #endif /* __ASSEMBLY__ */ /* @@ -57,6 +58,23 @@ #ifndef __ASSEMBLY__ +/* + * IRQ STACK - used for irq handler + */ +#ifdef __KERNEL__ + +#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ + +union irq_stack_union { + unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; +}; + +DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); + +void call_on_stack(unsigned long p1, void *func, unsigned long new_stack); + +#endif /* __KERNEL__ */ + /* * Data detected about CPUs at boot time which is the same for all CPU's. * HP boxes are SMP - ie identical processors. diff --git a/trunk/arch/parisc/kernel/drivers.c b/trunk/arch/parisc/kernel/drivers.c index 14285caec71a..5709c5e59be8 100644 --- a/trunk/arch/parisc/kernel/drivers.c +++ b/trunk/arch/parisc/kernel/drivers.c @@ -394,7 +394,7 @@ EXPORT_SYMBOL(print_pci_hwpath); static void setup_bus_id(struct parisc_device *padev) { struct hardware_path path; - char name[28]; + char name[20]; char *output = name; int i; diff --git a/trunk/arch/parisc/kernel/entry.S b/trunk/arch/parisc/kernel/entry.S index e8f07dd28401..4bb96ad9b0b1 100644 --- a/trunk/arch/parisc/kernel/entry.S +++ b/trunk/arch/parisc/kernel/entry.S @@ -65,11 +65,15 @@ rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ mtsp %r0, %sr4 mtsp %r0, %sr5 - mtsp %r0, %sr6 + mfsp %sr7, %r1 + or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */ + mtsp %r1, %sr3 tovirt_r1 %r29 load32 KERNEL_PSW, %r1 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ + mtsp %r0, %sr6 + mtsp %r0, %sr7 mtctl %r0, %cr17 /* Clear IIASQ tail */ mtctl %r0, %cr17 /* Clear IIASQ head */ mtctl %r1, %ipsw @@ -115,20 +119,17 @@ /* we save the registers in the task struct */ - copy %r30, %r17 mfctl %cr30, %r1 - ldo THREAD_SZ_ALGN(%r1), %r30 - mtsp %r0,%sr7 - mtsp %r16,%sr3 tophys %r1,%r9 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ tophys %r1,%r9 ldo TASK_REGS(%r9),%r9 - STREG %r17,PT_GR30(%r9) + STREG %r30, PT_GR30(%r9) STREG %r29,PT_GR29(%r9) STREG %r26,PT_GR26(%r9) - STREG %r16,PT_SR7(%r9) copy %r9,%r29 + mfctl %cr30, %r1 + ldo THREAD_SZ_ALGN(%r1), %r30 .endm .macro get_stack_use_r30 @@ -136,12 +137,10 @@ /* we put a struct pt_regs on the stack and save the registers there */ tophys %r30,%r9 - copy %r30,%r1 + STREG %r30,PT_GR30(%r9) ldo PT_SZ_ALGN(%r30),%r30 - STREG %r1,PT_GR30(%r9) STREG %r29,PT_GR29(%r9) STREG %r26,PT_GR26(%r9) - STREG %r16,PT_SR7(%r9) copy %r9,%r29 .endm @@ -453,41 +452,9 @@ L2_ptep \pgd,\pte,\index,\va,\fault .endm - /* Acquire pa_dbit_lock lock. */ - .macro dbit_lock spc,tmp,tmp1 -#ifdef CONFIG_SMP - cmpib,COND(=),n 0,\spc,2f - load32 PA(pa_dbit_lock),\tmp -1: LDCW 0(\tmp),\tmp1 - cmpib,COND(=) 0,\tmp1,1b - nop -2: -#endif - .endm - - /* Release pa_dbit_lock lock without reloading lock address. */ - .macro dbit_unlock0 spc,tmp -#ifdef CONFIG_SMP - or,COND(=) %r0,\spc,%r0 - stw \spc,0(\tmp) -#endif - .endm - - /* Release pa_dbit_lock lock. */ - .macro dbit_unlock1 spc,tmp -#ifdef CONFIG_SMP - load32 PA(pa_dbit_lock),\tmp - dbit_unlock0 \spc,\tmp -#endif - .endm - /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and * don't needlessly dirty the cache line if it was already set */ - .macro update_ptep spc,ptep,pte,tmp,tmp1 -#ifdef CONFIG_SMP - or,COND(=) %r0,\spc,%r0 - LDREG 0(\ptep),\pte -#endif + .macro update_ptep ptep,pte,tmp,tmp1 ldi _PAGE_ACCESSED,\tmp1 or \tmp1,\pte,\tmp and,COND(<>) \tmp1,\pte,%r0 @@ -496,11 +463,7 @@ /* Set the dirty bit (and accessed bit). No need to be * clever, this is only used from the dirty fault */ - .macro update_dirty spc,ptep,pte,tmp -#ifdef CONFIG_SMP - or,COND(=) %r0,\spc,%r0 - LDREG 0(\ptep),\pte -#endif + .macro update_dirty ptep,pte,tmp ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp or \tmp,\pte,\pte STREG \pte,0(\ptep) @@ -1148,13 +1111,11 @@ dtlb_miss_20w: L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb spc,pte,prot idtlbt pte,prot - dbit_unlock1 spc,t0 rfir nop @@ -1174,13 +1135,11 @@ nadtlb_miss_20w: L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb spc,pte,prot idtlbt pte,prot - dbit_unlock1 spc,t0 rfir nop @@ -1202,8 +1161,7 @@ dtlb_miss_11: L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1214,7 +1172,6 @@ dtlb_miss_11: idtlbp prot,(%sr1,va) mtsp t0, %sr1 /* Restore sr1 */ - dbit_unlock1 spc,t0 rfir nop @@ -1235,8 +1192,7 @@ nadtlb_miss_11: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1248,7 +1204,6 @@ nadtlb_miss_11: idtlbp prot,(%sr1,va) mtsp t0, %sr1 /* Restore sr1 */ - dbit_unlock1 spc,t0 rfir nop @@ -1269,15 +1224,13 @@ dtlb_miss_20: L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb spc,pte,prot f_extend pte,t0 idtlbt pte,prot - dbit_unlock1 spc,t0 rfir nop @@ -1297,15 +1250,13 @@ nadtlb_miss_20: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb spc,pte,prot f_extend pte,t0 idtlbt pte,prot - dbit_unlock1 spc,t0 rfir nop @@ -1406,13 +1357,11 @@ itlb_miss_20w: L3_ptep ptp,pte,t0,va,itlb_fault - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb spc,pte,prot iitlbt pte,prot - dbit_unlock1 spc,t0 rfir nop @@ -1430,13 +1379,11 @@ naitlb_miss_20w: L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb spc,pte,prot iitlbt pte,prot - dbit_unlock1 spc,t0 rfir nop @@ -1458,8 +1405,7 @@ itlb_miss_11: L2_ptep ptp,pte,t0,va,itlb_fault - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1470,7 +1416,6 @@ itlb_miss_11: iitlbp prot,(%sr1,va) mtsp t0, %sr1 /* Restore sr1 */ - dbit_unlock1 spc,t0 rfir nop @@ -1482,8 +1427,7 @@ naitlb_miss_11: L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1494,7 +1438,6 @@ naitlb_miss_11: iitlbp prot,(%sr1,va) mtsp t0, %sr1 /* Restore sr1 */ - dbit_unlock1 spc,t0 rfir nop @@ -1516,15 +1459,13 @@ itlb_miss_20: L2_ptep ptp,pte,t0,va,itlb_fault - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb spc,pte,prot f_extend pte,t0 iitlbt pte,prot - dbit_unlock1 spc,t0 rfir nop @@ -1536,15 +1477,13 @@ naitlb_miss_20: L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 - dbit_lock spc,t0,t1 - update_ptep spc,ptp,pte,t0,t1 + update_ptep ptp,pte,t0,t1 make_insert_tlb spc,pte,prot f_extend pte,t0 iitlbt pte,prot - dbit_unlock1 spc,t0 rfir nop @@ -1568,13 +1507,29 @@ dbit_trap_20w: L3_ptep ptp,pte,t0,va,dbit_fault - dbit_lock spc,t0,t1 - update_dirty spc,ptp,pte,t1 +#ifdef CONFIG_SMP + cmpib,COND(=),n 0,spc,dbit_nolock_20w + load32 PA(pa_dbit_lock),t0 + +dbit_spin_20w: + LDCW 0(t0),t1 + cmpib,COND(=) 0,t1,dbit_spin_20w + nop + +dbit_nolock_20w: +#endif + update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot idtlbt pte,prot - dbit_unlock0 spc,t0 +#ifdef CONFIG_SMP + cmpib,COND(=),n 0,spc,dbit_nounlock_20w + ldi 1,t1 + stw t1,0(t0) + +dbit_nounlock_20w: +#endif rfir nop @@ -1588,8 +1543,18 @@ dbit_trap_11: L2_ptep ptp,pte,t0,va,dbit_fault - dbit_lock spc,t0,t1 - update_dirty spc,ptp,pte,t1 +#ifdef CONFIG_SMP + cmpib,COND(=),n 0,spc,dbit_nolock_11 + load32 PA(pa_dbit_lock),t0 + +dbit_spin_11: + LDCW 0(t0),t1 + cmpib,= 0,t1,dbit_spin_11 + nop + +dbit_nolock_11: +#endif + update_dirty ptp,pte,t1 make_insert_tlb_11 spc,pte,prot @@ -1600,7 +1565,13 @@ dbit_trap_11: idtlbp prot,(%sr1,va) mtsp t1, %sr1 /* Restore sr1 */ - dbit_unlock0 spc,t0 +#ifdef CONFIG_SMP + cmpib,COND(=),n 0,spc,dbit_nounlock_11 + ldi 1,t1 + stw t1,0(t0) + +dbit_nounlock_11: +#endif rfir nop @@ -1612,15 +1583,32 @@ dbit_trap_20: L2_ptep ptp,pte,t0,va,dbit_fault - dbit_lock spc,t0,t1 - update_dirty spc,ptp,pte,t1 +#ifdef CONFIG_SMP + cmpib,COND(=),n 0,spc,dbit_nolock_20 + load32 PA(pa_dbit_lock),t0 + +dbit_spin_20: + LDCW 0(t0),t1 + cmpib,= 0,t1,dbit_spin_20 + nop + +dbit_nolock_20: +#endif + update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot f_extend pte,t1 idtlbt pte,prot - dbit_unlock0 spc,t0 + +#ifdef CONFIG_SMP + cmpib,COND(=),n 0,spc,dbit_nounlock_20 + ldi 1,t1 + stw t1,0(t0) + +dbit_nounlock_20: +#endif rfir nop diff --git a/trunk/arch/parisc/kernel/hardware.c b/trunk/arch/parisc/kernel/hardware.c index 872275659d98..f7752f6af29e 100644 --- a/trunk/arch/parisc/kernel/hardware.c +++ b/trunk/arch/parisc/kernel/hardware.c @@ -222,7 +222,6 @@ static struct hp_hardware hp_hardware_list[] = { {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"}, {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"}, {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"}, - {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"}, {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"}, {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"}, {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"}, @@ -1205,7 +1204,6 @@ static struct hp_hardware hp_hardware_list[] = { {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, - {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, diff --git a/trunk/arch/parisc/kernel/irq.c b/trunk/arch/parisc/kernel/irq.c index 2e6443b1e922..e255db0bb761 100644 --- a/trunk/arch/parisc/kernel/irq.c +++ b/trunk/arch/parisc/kernel/irq.c @@ -27,11 +27,11 @@ #include #include #include +#include #include #include #include -#include #undef PARISC_IRQ_CR16_COUNTS @@ -166,36 +166,22 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%*s: ", prec, "STK"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); - seq_puts(p, " Kernel stack usage\n"); -# ifdef CONFIG_IRQSTACKS - seq_printf(p, "%*s: ", prec, "IST"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage); - seq_puts(p, " Interrupt stack usage\n"); -# endif + seq_printf(p, " Kernel stack usage\n"); #endif #ifdef CONFIG_SMP seq_printf(p, "%*s: ", prec, "RES"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); - seq_puts(p, " Rescheduling interrupts\n"); + seq_printf(p, " Rescheduling interrupts\n"); seq_printf(p, "%*s: ", prec, "CAL"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); - seq_puts(p, " Function call interrupts\n"); + seq_printf(p, " Function call interrupts\n"); #endif - seq_printf(p, "%*s: ", prec, "UAH"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count); - seq_puts(p, " Unaligned access handler traps\n"); - seq_printf(p, "%*s: ", prec, "FPA"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count); - seq_puts(p, " Floating point assist traps\n"); seq_printf(p, "%*s: ", prec, "TLB"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); - seq_puts(p, " TLB shootdowns\n"); + seq_printf(p, " TLB shootdowns\n"); return 0; } @@ -380,24 +366,6 @@ static inline int eirr_to_irq(unsigned long eirr) return (BITS_PER_LONG - bit) + TIMER_IRQ; } -#ifdef CONFIG_IRQSTACKS -/* - * IRQ STACK - used for irq handler - */ -#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ - -union irq_stack_union { - unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; - volatile unsigned int slock[4]; - volatile unsigned int lock[1]; -}; - -DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { - .slock = { 1,1,1,1 }, - }; -#endif - - int sysctl_panic_on_stackoverflow = 1; static inline void stack_overflow_check(struct pt_regs *regs) @@ -410,7 +378,6 @@ static inline void stack_overflow_check(struct pt_regs *regs) unsigned long sp = regs->gr[30]; unsigned long stack_usage; unsigned int *last_usage; - int cpu = smp_processor_id(); /* if sr7 != 0, we interrupted a userspace process which we do not want * to check for stack overflow. We will only check the kernel stack. */ @@ -419,31 +386,7 @@ static inline void stack_overflow_check(struct pt_regs *regs) /* calculate kernel stack usage */ stack_usage = sp - stack_start; -#ifdef CONFIG_IRQSTACKS - if (likely(stack_usage <= THREAD_SIZE)) - goto check_kernel_stack; /* found kernel stack */ - - /* check irq stack usage */ - stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; - stack_usage = sp - stack_start; - - last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); - if (unlikely(stack_usage > *last_usage)) - *last_usage = stack_usage; - - if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN))) - return; - - pr_emerg("stackcheck: %s will most likely overflow irq stack " - "(sp:%lx, stk bottom-top:%lx-%lx)\n", - current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE); - goto panic_check; - -check_kernel_stack: -#endif - - /* check kernel stack usage */ - last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); + last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id()); if (unlikely(stack_usage > *last_usage)) *last_usage = stack_usage; @@ -455,66 +398,31 @@ static inline void stack_overflow_check(struct pt_regs *regs) "(sp:%lx, stk bottom-top:%lx-%lx)\n", current->comm, sp, stack_start, stack_start + THREAD_SIZE); -#ifdef CONFIG_IRQSTACKS -panic_check: -#endif if (sysctl_panic_on_stackoverflow) panic("low stack detected by irq handler - check messages\n"); #endif } #ifdef CONFIG_IRQSTACKS -/* in entry.S: */ -void call_on_stack(unsigned long p1, void *func, unsigned long new_stack); +DEFINE_PER_CPU(union irq_stack_union, irq_stack_union); static void execute_on_irq_stack(void *func, unsigned long param1) { - union irq_stack_union *union_ptr; + unsigned long *irq_stack_start; unsigned long irq_stack; - volatile unsigned int *irq_stack_in_use; - - union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); - irq_stack = (unsigned long) &union_ptr->stack; - irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock), - 64); /* align for stack frame usage */ + int cpu = smp_processor_id(); - /* We may be called recursive. If we are already using the irq stack, - * just continue to use it. Use spinlocks to serialize - * the irq stack usage. - */ - irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr); - if (!__ldcw(irq_stack_in_use)) { - void (*direct_call)(unsigned long p1) = func; + irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0]; + irq_stack = (unsigned long) irq_stack_start; + irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */ - /* We are using the IRQ stack already. - * Do direct call on current stack. */ - direct_call(param1); - return; - } + BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */ + *irq_stack_start = 1; /* This is where we switch to the IRQ stack. */ call_on_stack(param1, func, irq_stack); - /* free up irq stack usage. */ - *irq_stack_in_use = 1; -} - -asmlinkage void do_softirq(void) -{ - __u32 pending; - unsigned long flags; - - if (in_interrupt()) - return; - - local_irq_save(flags); - - pending = local_softirq_pending(); - - if (pending) - execute_on_irq_stack(__do_softirq, 0); - - local_irq_restore(flags); + *irq_stack_start = 0; } #endif /* CONFIG_IRQSTACKS */ diff --git a/trunk/arch/parisc/kernel/pacache.S b/trunk/arch/parisc/kernel/pacache.S index b743a80eaba0..5e1de6072be5 100644 --- a/trunk/arch/parisc/kernel/pacache.S +++ b/trunk/arch/parisc/kernel/pacache.S @@ -605,14 +605,14 @@ ENTRY(copy_user_page_asm) convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */ depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ + depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ copy %r28, %r29 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */ #else extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */ depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ + depwi 0, 31,12, %r28 /* Clear any offset bits */ copy %r28, %r29 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */ #endif @@ -762,7 +762,7 @@ ENTRY(clear_user_page_asm) #else extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ + depwi 0, 31,12, %r28 /* Clear any offset bits */ #endif /* Purge any old translation */ @@ -846,7 +846,7 @@ ENTRY(flush_dcache_page_asm) #else extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ + depwi 0, 31,12, %r28 /* Clear any offset bits */ #endif /* Purge any old translation */ @@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm) #endif ldil L%dcache_stride, %r1 - ldw R%dcache_stride(%r1), r31 + ldw R%dcache_stride(%r1), %r1 #ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 @@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm) depwi,z 1, 31-PAGE_SHIFT,1, %r25 #endif add %r28, %r25, %r25 - sub %r25, r31, %r25 - - -1: fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) + sub %r25, %r1, %r25 + + +1: fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) cmpb,COND(<<) %r28, %r25,1b - fdc,m r31(%r28) + fdc,m %r1(%r28) sync @@ -918,11 +918,11 @@ ENTRY(flush_icache_page_asm) #endif convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ + depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ #else extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ + depwi 0, 31,12, %r28 /* Clear any offset bits */ #endif /* Purge any old translation */ @@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm) #endif ldil L%icache_stride, %r1 - ldw R%icache_stride(%r1), %r31 + ldw R%icache_stride(%r1), %r1 #ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 @@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm) depwi,z 1, 31-PAGE_SHIFT,1, %r25 #endif add %r28, %r25, %r25 - sub %r25, %r31, %r25 + sub %r25, %r1, %r25 /* fic only has the type 26 form on PA1.1, requiring an * explicit space specification, so use %sr4 */ -1: fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) +1: fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) cmpb,COND(<<) %r28, %r25,1b - fic,m %r31(%sr4,%r28) + fic,m %r1(%sr4,%r28) sync diff --git a/trunk/arch/parisc/kernel/pci.c b/trunk/arch/parisc/kernel/pci.c index 64f2764a8cef..60309051875e 100644 --- a/trunk/arch/parisc/kernel/pci.c +++ b/trunk/arch/parisc/kernel/pci.c @@ -220,33 +220,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, } -int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine) -{ - unsigned long prot; - - /* - * I/O space can be accessed via normal processor loads and stores on - * this platform but for now we elect not to do this and portable - * drivers should not do this anyway. - */ - if (mmap_state == pci_mmap_io) - return -EINVAL; - - if (write_combine) - return -EINVAL; - - /* - * Ignore write-combine; for now only return uncached mappings. - */ - prot = pgprot_val(vma->vm_page_prot); - prot |= _PAGE_NO_CACHE; - vma->vm_page_prot = __pgprot(prot); - - return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - vma->vm_end - vma->vm_start, vma->vm_page_prot); -} - /* * A driver is enabling the device. We make sure that all the appropriate * bits are set to allow the device to operate as the driver is expecting. diff --git a/trunk/arch/parisc/kernel/setup.c b/trunk/arch/parisc/kernel/setup.c index 1e95b2000ce8..76b63e726a53 100644 --- a/trunk/arch/parisc/kernel/setup.c +++ b/trunk/arch/parisc/kernel/setup.c @@ -69,8 +69,7 @@ void __init setup_cmdline(char **cmdline_p) /* called from hpux boot loader */ boot_command_line[0] = '\0'; } else { - strlcpy(boot_command_line, (char *)__va(boot_args[1]), - COMMAND_LINE_SIZE); + strcpy(boot_command_line, (char *)__va(boot_args[1])); #ifdef CONFIG_BLK_DEV_INITRD if (boot_args[2] != 0) /* did palo pass us a ramdisk? */ diff --git a/trunk/arch/parisc/kernel/traps.c b/trunk/arch/parisc/kernel/traps.c index 04e47c6a4562..fe41a98043bb 100644 --- a/trunk/arch/parisc/kernel/traps.c +++ b/trunk/arch/parisc/kernel/traps.c @@ -646,7 +646,6 @@ void notrace handle_interruption(int code, struct pt_regs *regs) case 14: /* Assist Exception Trap, i.e. floating point exception. */ die_if_kernel("Floating point exception", regs, 0); /* quiet */ - __inc_irq_stat(irq_fpassist_count); handle_fpe(regs); return; diff --git a/trunk/arch/parisc/kernel/unaligned.c b/trunk/arch/parisc/kernel/unaligned.c index d7c0acb35ec2..234e3682cf09 100644 --- a/trunk/arch/parisc/kernel/unaligned.c +++ b/trunk/arch/parisc/kernel/unaligned.c @@ -27,7 +27,6 @@ #include #include #include -#include /* #define DEBUG_UNALIGNED 1 */ @@ -455,8 +454,6 @@ void handle_unaligned(struct pt_regs *regs) struct siginfo si; register int flop=0; /* true if this is a flop */ - __inc_irq_stat(irq_unaligned_count); - /* log a message with pacing */ if (user_mode(regs)) { if (current->thread.flags & PARISC_UAC_SIGBUS) { diff --git a/trunk/arch/parisc/mm/init.c b/trunk/arch/parisc/mm/init.c index 505b56c6b9b9..ce939ac8622b 100644 --- a/trunk/arch/parisc/mm/init.c +++ b/trunk/arch/parisc/mm/init.c @@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt #ifdef CONFIG_DISCONTIGMEM struct node_map_data node_data[MAX_NUMNODES] __read_mostly; -signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; +unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; #endif static struct resource data_resource = { @@ -1069,7 +1069,7 @@ void flush_tlb_all(void) { int do_recycle; - __inc_irq_stat(irq_tlb_count); + inc_irq_stat(irq_tlb_count); do_recycle = 0; spin_lock(&sid_lock); if (dirty_space_ids > RECYCLE_THRESHOLD) { @@ -1090,7 +1090,7 @@ void flush_tlb_all(void) #else void flush_tlb_all(void) { - __inc_irq_stat(irq_tlb_count); + inc_irq_stat(irq_tlb_count); spin_lock(&sid_lock); flush_tlb_all_local(NULL); recycle_sids(); diff --git a/trunk/arch/powerpc/Kconfig.debug b/trunk/arch/powerpc/Kconfig.debug index 863d877e0b5f..5416e28a7538 100644 --- a/trunk/arch/powerpc/Kconfig.debug +++ b/trunk/arch/powerpc/Kconfig.debug @@ -262,31 +262,8 @@ config PPC_EARLY_DEBUG_OPAL_HVSI Select this to enable early debugging for the PowerNV platform using an "hvsi" console -config PPC_EARLY_DEBUG_MEMCONS - bool "In memory console" - help - Select this to enable early debugging using an in memory console. - This console provides input and output buffers stored within the - kernel BSS and should be safe to select on any system. A debugger - can then be used to read kernel output or send input to the console. endchoice -config PPC_MEMCONS_OUTPUT_SIZE - int "In memory console output buffer size" - depends on PPC_EARLY_DEBUG_MEMCONS - default 4096 - help - Selects the size of the output buffer (in bytes) of the in memory - console. - -config PPC_MEMCONS_INPUT_SIZE - int "In memory console input buffer size" - depends on PPC_EARLY_DEBUG_MEMCONS - default 128 - help - Selects the size of the input buffer (in bytes) of the in memory - console. - config PPC_EARLY_DEBUG_OPAL def_bool y depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI diff --git a/trunk/arch/powerpc/configs/ps3_defconfig b/trunk/arch/powerpc/configs/ps3_defconfig index 139a8308070c..f79196232917 100644 --- a/trunk/arch/powerpc/configs/ps3_defconfig +++ b/trunk/arch/powerpc/configs/ps3_defconfig @@ -136,6 +136,7 @@ CONFIG_HID_SMARTJOYPLUS=m CONFIG_USB_HIDDEV=y CONFIG_USB=m CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_SUSPEND=y CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=m # CONFIG_USB_EHCI_HCD_PPC_OF is not set diff --git a/trunk/arch/powerpc/include/asm/context_tracking.h b/trunk/arch/powerpc/include/asm/context_tracking.h deleted file mode 100644 index b6f5a33b8ee2..000000000000 --- a/trunk/arch/powerpc/include/asm/context_tracking.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H -#define _ASM_POWERPC_CONTEXT_TRACKING_H - -#ifdef CONFIG_CONTEXT_TRACKING -#define SCHEDULE_USER bl .schedule_user -#else -#define SCHEDULE_USER bl .schedule -#endif - -#endif diff --git a/trunk/arch/powerpc/include/asm/cputable.h b/trunk/arch/powerpc/include/asm/cputable.h index 6f3887d884d2..26807e5aff51 100644 --- a/trunk/arch/powerpc/include/asm/cputable.h +++ b/trunk/arch/powerpc/include/asm/cputable.h @@ -176,7 +176,6 @@ extern const char *powerpc_base_platform; #define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000) #define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) #define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) -#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) #ifndef __ASSEMBLY__ @@ -395,20 +394,19 @@ extern const char *powerpc_base_platform; CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ - CPU_FTR_HVMODE | CPU_FTR_DABRX) + CPU_FTR_HVMODE) #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ - CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX) + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ - CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \ - CPU_FTR_DABRX) + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -417,7 +415,7 @@ extern const char *powerpc_base_platform; CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \ - CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX) + CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR) #define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -432,15 +430,14 @@ extern const char *powerpc_base_platform; CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ - CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX) + CPU_FTR_UNALIGNED_LD_STD) #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX) + CPU_FTR_PURR | CPU_FTR_REAL_LE) #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ - CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \ - CPU_FTR_ICSWX | CPU_FTR_DABRX ) + CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX) #ifdef __powerpc64__ #ifdef CONFIG_PPC_BOOK3E diff --git a/trunk/arch/powerpc/include/asm/exception-64s.h b/trunk/arch/powerpc/include/asm/exception-64s.h index 46793b58a761..8e5fae8beaf6 100644 --- a/trunk/arch/powerpc/include/asm/exception-64s.h +++ b/trunk/arch/powerpc/include/asm/exception-64s.h @@ -513,7 +513,7 @@ label##_common: \ */ #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ - FINISH_NAP;DISABLE_INTS;RUNLATCH_ON) + FINISH_NAP;RUNLATCH_ON;DISABLE_INTS) /* * When the idle code in power4_idle puts the CPU into NAP mode, diff --git a/trunk/arch/powerpc/include/asm/firmware.h b/trunk/arch/powerpc/include/asm/firmware.h index 681bc0314b6b..0df54646f968 100644 --- a/trunk/arch/powerpc/include/asm/firmware.h +++ b/trunk/arch/powerpc/include/asm/firmware.h @@ -52,7 +52,6 @@ #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) -#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000) #ifndef __ASSEMBLY__ @@ -70,8 +69,7 @@ enum { FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, FW_FEATURE_PSERIES_ALWAYS = 0, - FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 | - FW_FEATURE_OPALv3, + FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, FW_FEATURE_POWERNV_ALWAYS = 0, FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, diff --git a/trunk/arch/powerpc/include/asm/hvcall.h b/trunk/arch/powerpc/include/asm/hvcall.h index 0c7f2bfcf134..cf4df8e2139a 100644 --- a/trunk/arch/powerpc/include/asm/hvcall.h +++ b/trunk/arch/powerpc/include/asm/hvcall.h @@ -264,7 +264,6 @@ #define H_GET_MPP 0x2D4 #define H_HOME_NODE_ASSOCIATIVITY 0x2EC #define H_BEST_ENERGY 0x2F4 -#define H_XIRR_X 0x2FC #define H_RANDOM 0x300 #define H_COP 0x304 #define H_GET_MPP_X 0x314 diff --git a/trunk/arch/powerpc/include/asm/hw_irq.h b/trunk/arch/powerpc/include/asm/hw_irq.h index ba713f166fa5..d615b28dda82 100644 --- a/trunk/arch/powerpc/include/asm/hw_irq.h +++ b/trunk/arch/powerpc/include/asm/hw_irq.h @@ -96,12 +96,11 @@ static inline bool arch_irqs_disabled(void) #endif #define hard_irq_disable() do { \ - u8 _was_enabled = get_paca()->soft_enabled; \ __hard_irq_disable(); \ + if (local_paca->soft_enabled) \ + trace_hardirqs_off(); \ get_paca()->soft_enabled = 0; \ get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ - if (_was_enabled) \ - trace_hardirqs_off(); \ } while(0) static inline bool lazy_irq_pending(void) diff --git a/trunk/arch/powerpc/include/asm/kvm_asm.h b/trunk/arch/powerpc/include/asm/kvm_asm.h index 851bac7afa4b..b9dd382cb349 100644 --- a/trunk/arch/powerpc/include/asm/kvm_asm.h +++ b/trunk/arch/powerpc/include/asm/kvm_asm.h @@ -54,16 +54,8 @@ #define BOOKE_INTERRUPT_DEBUG 15 /* E500 */ -#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32 -#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33 -/* - * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines - */ -#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL -#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST -#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL -#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \ - BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST +#define BOOKE_INTERRUPT_SPE_UNAVAIL 32 +#define BOOKE_INTERRUPT_SPE_FP_DATA 33 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 #define BOOKE_INTERRUPT_DOORBELL 36 @@ -75,6 +67,10 @@ #define BOOKE_INTERRUPT_HV_SYSCALL 40 #define BOOKE_INTERRUPT_HV_PRIV 41 +/* altivec */ +#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42 +#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43 + /* book3s */ #define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100 diff --git a/trunk/arch/powerpc/include/asm/opal.h b/trunk/arch/powerpc/include/asm/opal.h index cbb9305ab15a..b6c8b58b1d76 100644 --- a/trunk/arch/powerpc/include/asm/opal.h +++ b/trunk/arch/powerpc/include/asm/opal.h @@ -243,8 +243,7 @@ enum OpalMCE_TlbErrorType { enum OpalThreadStatus { OPAL_THREAD_INACTIVE = 0x0, - OPAL_THREAD_STARTED = 0x1, - OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */ + OPAL_THREAD_STARTED = 0x1 }; enum OpalPciBusCompare { @@ -564,8 +563,6 @@ extern void opal_nvram_init(void); extern int opal_machine_check(struct pt_regs *regs); -extern void opal_shutdown(void); - #endif /* __ASSEMBLY__ */ #endif /* __OPAL_H */ diff --git a/trunk/arch/powerpc/include/asm/pci-bridge.h b/trunk/arch/powerpc/include/asm/pci-bridge.h index 2c1d8cb9b265..8b11b5bd9938 100644 --- a/trunk/arch/powerpc/include/asm/pci-bridge.h +++ b/trunk/arch/powerpc/include/asm/pci-bridge.h @@ -174,8 +174,6 @@ struct pci_dn { /* Get the pointer to a device_node's pci_dn */ #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) -extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev); - extern void * update_dn_pci_info(struct device_node *dn, void *data); static inline int pci_device_from_OF_node(struct device_node *np, diff --git a/trunk/arch/powerpc/include/asm/pgalloc-64.h b/trunk/arch/powerpc/include/asm/pgalloc-64.h index b66ae722a8e9..91acb12bac92 100644 --- a/trunk/arch/powerpc/include/asm/pgalloc-64.h +++ b/trunk/arch/powerpc/include/asm/pgalloc-64.h @@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, static inline pgtable_t pmd_pgtable(pmd_t pmd) { - return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS); + return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, diff --git a/trunk/arch/powerpc/include/asm/pgtable.h b/trunk/arch/powerpc/include/asm/pgtable.h index b6293d26bd39..7aeb9555f6ea 100644 --- a/trunk/arch/powerpc/include/asm/pgtable.h +++ b/trunk/arch/powerpc/include/asm/pgtable.h @@ -198,6 +198,9 @@ extern void paging_init(void); */ #define kern_addr_valid(addr) (1) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #include diff --git a/trunk/arch/powerpc/include/asm/ppc_asm.h b/trunk/arch/powerpc/include/asm/ppc_asm.h index 2f1b6c5f8174..cea8496091ff 100644 --- a/trunk/arch/powerpc/include/asm/ppc_asm.h +++ b/trunk/arch/powerpc/include/asm/ppc_asm.h @@ -523,17 +523,6 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946) #define PPC440EP_ERR42 #endif -/* The following stops all load and store data streams associated with stream - * ID (ie. streams created explicitly). The embedded and server mnemonics for - * dcbt are different so we use machine "power4" here explicitly. - */ -#define DCBT_STOP_ALL_STREAM_IDS(scratch) \ -.machine push ; \ -.machine "power4" ; \ - lis scratch,0x60000000@h; \ - dcbt r0,scratch,0b01010; \ -.machine pop - /* * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them * keep the address intact to be compatible with code shared with diff --git a/trunk/arch/powerpc/include/asm/processor.h b/trunk/arch/powerpc/include/asm/processor.h index 14a658363698..d7e67ca8b4a6 100644 --- a/trunk/arch/powerpc/include/asm/processor.h +++ b/trunk/arch/powerpc/include/asm/processor.h @@ -284,12 +284,6 @@ struct thread_struct { unsigned long ebbrr; unsigned long ebbhr; unsigned long bescr; - unsigned long siar; - unsigned long sdar; - unsigned long sier; - unsigned long mmcr0; - unsigned long mmcr2; - unsigned long mmcra; #endif }; @@ -409,16 +403,21 @@ static inline void prefetchw(const void *x) #endif #ifdef CONFIG_PPC64 -static inline unsigned long get_clean_sp(unsigned long sp, int is_32) +static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) { + unsigned long sp; + if (is_32) - return sp & 0x0ffffffffUL; + sp = regs->gpr[1] & 0x0ffffffffUL; + else + sp = regs->gpr[1]; + return sp; } #else -static inline unsigned long get_clean_sp(unsigned long sp, int is_32) +static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) { - return sp; + return regs->gpr[1]; } #endif diff --git a/trunk/arch/powerpc/include/asm/pte-hash64-64k.h b/trunk/arch/powerpc/include/asm/pte-hash64-64k.h index d836d945068d..3e13e23e4fdf 100644 --- a/trunk/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/trunk/arch/powerpc/include/asm/pte-hash64-64k.h @@ -47,7 +47,7 @@ * generic accessors and iterators here */ #define __real_pte(e,p) ((real_pte_t) { \ - (e), (pte_val(e) & _PAGE_COMBO) ? \ + (e), ((e) & _PAGE_COMBO) ? \ (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) diff --git a/trunk/arch/powerpc/include/asm/reg.h b/trunk/arch/powerpc/include/asm/reg.h index 4a9e408644fe..a6136515c7f2 100644 --- a/trunk/arch/powerpc/include/asm/reg.h +++ b/trunk/arch/powerpc/include/asm/reg.h @@ -111,6 +111,17 @@ #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) +/* Reason codes describing kernel causes for transaction aborts. By + convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if + the failure is persistent. +*/ +#define TM_CAUSE_RESCHED 0xfe +#define TM_CAUSE_TLBI 0xfc +#define TM_CAUSE_FAC_UNAV 0xfa +#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */ +#define TM_CAUSE_MISC 0xf6 +#define TM_CAUSE_SIGNAL 0xf4 + #if defined(CONFIG_PPC_BOOK3S_64) #define MSR_64BIT MSR_SF diff --git a/trunk/arch/powerpc/include/asm/rtas.h b/trunk/arch/powerpc/include/asm/rtas.h index 34fd70488d83..a8bc2bb4adc9 100644 --- a/trunk/arch/powerpc/include/asm/rtas.h +++ b/trunk/arch/powerpc/include/asm/rtas.h @@ -264,8 +264,6 @@ extern void rtas_progress(char *s, unsigned short hex); extern void rtas_initialize(void); extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); -extern int rtas_online_cpus_mask(cpumask_var_t cpus); -extern int rtas_offline_cpus_mask(cpumask_var_t cpus); extern int rtas_ibm_suspend_me(struct rtas_args *); struct rtc_time; diff --git a/trunk/arch/powerpc/include/asm/signal.h b/trunk/arch/powerpc/include/asm/signal.h index 9322c28aebd2..fbe66c463891 100644 --- a/trunk/arch/powerpc/include/asm/signal.h +++ b/trunk/arch/powerpc/include/asm/signal.h @@ -3,8 +3,5 @@ #define __ARCH_HAS_SA_RESTORER #include -#include - -extern unsigned long get_tm_stackpointer(struct pt_regs *regs); #endif /* _ASM_POWERPC_SIGNAL_H */ diff --git a/trunk/arch/powerpc/include/asm/thread_info.h b/trunk/arch/powerpc/include/asm/thread_info.h index ba7b1973866e..8ceea14d6fe4 100644 --- a/trunk/arch/powerpc/include/asm/thread_info.h +++ b/trunk/arch/powerpc/include/asm/thread_info.h @@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ -#define TIF_NOHZ 9 /* in adaptive nohz mode */ +#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ @@ -106,7 +106,6 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation for stack store? */ -#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM extern void do_load_up_transact_fpu(struct thread_struct *thread); extern void do_load_up_transact_altivec(struct thread_struct *thread); diff --git a/trunk/arch/powerpc/include/asm/udbg.h b/trunk/arch/powerpc/include/asm/udbg.h index dc590919f8eb..5a7510e9d09d 100644 --- a/trunk/arch/powerpc/include/asm/udbg.h +++ b/trunk/arch/powerpc/include/asm/udbg.h @@ -52,7 +52,6 @@ extern void __init udbg_init_40x_realmode(void); extern void __init udbg_init_cpm(void); extern void __init udbg_init_usbgecko(void); extern void __init udbg_init_wsp(void); -extern void __init udbg_init_memcons(void); extern void __init udbg_init_ehv_bc(void); extern void __init udbg_init_ps3gelic(void); extern void __init udbg_init_debug_opal_raw(void); diff --git a/trunk/arch/powerpc/include/uapi/asm/Kbuild b/trunk/arch/powerpc/include/uapi/asm/Kbuild index 5182c8622b54..f7bca6370745 100644 --- a/trunk/arch/powerpc/include/uapi/asm/Kbuild +++ b/trunk/arch/powerpc/include/uapi/asm/Kbuild @@ -40,7 +40,6 @@ header-y += statfs.h header-y += swab.h header-y += termbits.h header-y += termios.h -header-y += tm.h header-y += types.h header-y += ucontext.h header-y += unistd.h diff --git a/trunk/arch/powerpc/include/uapi/asm/tm.h b/trunk/arch/powerpc/include/uapi/asm/tm.h deleted file mode 100644 index 85059a00f560..000000000000 --- a/trunk/arch/powerpc/include/uapi/asm/tm.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _ASM_POWERPC_TM_H -#define _ASM_POWERPC_TM_H - -/* Reason codes describing kernel causes for transaction aborts. By - * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if - * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor. - */ -#define TM_CAUSE_PERSISTENT 0x01 -#define TM_CAUSE_RESCHED 0xde -#define TM_CAUSE_TLBI 0xdc -#define TM_CAUSE_FAC_UNAV 0xda -#define TM_CAUSE_SYSCALL 0xd8 /* future use */ -#define TM_CAUSE_MISC 0xd6 /* future use */ -#define TM_CAUSE_SIGNAL 0xd4 -#define TM_CAUSE_ALIGNMENT 0xd2 -#define TM_CAUSE_EMULATE 0xd0 - -#endif diff --git a/trunk/arch/powerpc/kernel/asm-offsets.c b/trunk/arch/powerpc/kernel/asm-offsets.c index 6f16ffafa6f0..b51a97cfedf8 100644 --- a/trunk/arch/powerpc/kernel/asm-offsets.c +++ b/trunk/arch/powerpc/kernel/asm-offsets.c @@ -127,12 +127,6 @@ int main(void) DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr)); DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr)); DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr)); - DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar)); - DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar)); - DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier)); - DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0)); - DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2)); - DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra)); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch)); diff --git a/trunk/arch/powerpc/kernel/cpu_setup_power.S b/trunk/arch/powerpc/kernel/cpu_setup_power.S index 18b5b9cf8e37..a283b6442b26 100644 --- a/trunk/arch/powerpc/kernel/cpu_setup_power.S +++ b/trunk/arch/powerpc/kernel/cpu_setup_power.S @@ -135,12 +135,8 @@ __init_HFSCR: blr __init_TLB: - /* - * Clear the TLB using the "IS 3" form of tlbiel instruction - * (invalidate by congruence class). P7 has 128 CCs, P8 has 512 - * so we just always do 512 - */ - li r6,512 + /* Clear the TLB */ + li r6,128 mtctr r6 li r7,0xc00 /* IS field = 0b11 */ ptesync diff --git a/trunk/arch/powerpc/kernel/cputable.c b/trunk/arch/powerpc/kernel/cputable.c index 2a45d0f04385..c60bbec25c1f 100644 --- a/trunk/arch/powerpc/kernel/cputable.c +++ b/trunk/arch/powerpc/kernel/cputable.c @@ -452,7 +452,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .mmu_features = MMU_FTRS_POWER8, .icache_bsize = 128, .dcache_bsize = 128, - .oprofile_type = PPC_OPROFILE_INVALID, + .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, @@ -482,7 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER7+ (raw)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, - .cpu_user_features2 = COMMON_USER2_POWER7, + .cpu_user_features = COMMON_USER2_POWER7, .mmu_features = MMU_FTRS_POWER7, .icache_bsize = 128, .dcache_bsize = 128, @@ -507,7 +507,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power8", - .oprofile_type = PPC_OPROFILE_INVALID, + .oprofile_type = PPC_OPROFILE_POWER4, .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, .platform = "power8", diff --git a/trunk/arch/powerpc/kernel/entry_32.S b/trunk/arch/powerpc/kernel/entry_32.S index 22b45a4955cd..e514de57a125 100644 --- a/trunk/arch/powerpc/kernel/entry_32.S +++ b/trunk/arch/powerpc/kernel/entry_32.S @@ -439,6 +439,8 @@ ret_from_fork: ret_from_kernel_thread: REST_NVGPRS(r1) bl schedule_tail + li r3,0 + stw r3,0(r1) mtlr r14 mr r3,r15 PPC440EP_ERR42 @@ -849,7 +851,7 @@ resume_kernel: /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ CURRENT_THREAD_INFO(r9, r1) lwz r8,TI_FLAGS(r9) - andis. r0,r8,_TIF_EMULATE_STACK_STORE@h + andis. r8,r8,_TIF_EMULATE_STACK_STORE@h beq+ 1f addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ diff --git a/trunk/arch/powerpc/kernel/entry_64.S b/trunk/arch/powerpc/kernel/entry_64.S index 8741c854e03d..915fbb4fc2fe 100644 --- a/trunk/arch/powerpc/kernel/entry_64.S +++ b/trunk/arch/powerpc/kernel/entry_64.S @@ -33,7 +33,6 @@ #include #include #include -#include /* * System calls. @@ -377,6 +376,8 @@ _GLOBAL(ret_from_fork) _GLOBAL(ret_from_kernel_thread) bl .schedule_tail REST_NVGPRS(r1) + li r3,0 + std r3,0(r1) ld r14, 0(r14) mtlr r14 mr r3,r15 @@ -487,13 +488,6 @@ BEGIN_FTR_SECTION ldarx r6,0,r1 END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) -#ifdef CONFIG_PPC_BOOK3S -/* Cancel all explict user streams as they will have no use after context - * switch and will stop the HW from creating streams itself - */ - DCBT_STOP_ALL_STREAM_IDS(r6) -#endif - addi r6,r4,-THREAD /* Convert THREAD to 'current' */ std r6,PACACURRENT(r13) /* Set new 'current' */ @@ -640,7 +634,7 @@ _GLOBAL(ret_from_except_lite) andi. r0,r4,_TIF_NEED_RESCHED beq 1f bl .restore_interrupts - SCHEDULE_USER + bl .schedule b .ret_from_except_lite 1: bl .save_nvgprs diff --git a/trunk/arch/powerpc/kernel/exceptions-64e.S b/trunk/arch/powerpc/kernel/exceptions-64e.S index 645170a07ada..42a756eec9ff 100644 --- a/trunk/arch/powerpc/kernel/exceptions-64e.S +++ b/trunk/arch/powerpc/kernel/exceptions-64e.S @@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) */ mfspr r14,SPRN_DBSR /* check single-step/branch taken */ - andis. r15,r14,(DBSR_IC|DBSR_BT)@h + andis. r15,r14,DBSR_IC@h beq+ 1f LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) @@ -500,7 +500,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) bge+ cr1,1f /* here it looks like we got an inappropriate debug exception. */ - lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ + lis r14,DBSR_IC@h /* clear the IC event */ rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ mtspr SPRN_DBSR,r14 mtspr SPRN_CSRR1,r11 @@ -555,7 +555,7 @@ kernel_dbg_exc: */ mfspr r14,SPRN_DBSR /* check single-step/branch taken */ - andis. r15,r14,(DBSR_IC|DBSR_BT)@h + andis. r15,r14,DBSR_IC@h beq+ 1f LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) @@ -566,7 +566,7 @@ kernel_dbg_exc: bge+ cr1,1f /* here it looks like we got an inappropriate debug exception. */ - lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ + lis r14,DBSR_IC@h /* clear the IC event */ rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ mtspr SPRN_DBSR,r14 mtspr SPRN_DSRR1,r11 diff --git a/trunk/arch/powerpc/kernel/exceptions-64s.S b/trunk/arch/powerpc/kernel/exceptions-64s.S index 40e4a17c8ba0..e6eba1bf61ad 100644 --- a/trunk/arch/powerpc/kernel/exceptions-64s.S +++ b/trunk/arch/powerpc/kernel/exceptions-64s.S @@ -454,14 +454,38 @@ BEGIN_FTR_SECTION xori r10,r10,(MSR_FE0|MSR_FE1) mtmsrd r10 sync - -#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1 -#define FMR4(n) FMR2(n) ; FMR2(n+2) -#define FMR8(n) FMR4(n) ; FMR4(n+4) -#define FMR16(n) FMR8(n) ; FMR8(n+8) -#define FMR32(n) FMR16(n) ; FMR16(n+16) - FMR32(0) - + fmr 0,0 + fmr 1,1 + fmr 2,2 + fmr 3,3 + fmr 4,4 + fmr 5,5 + fmr 6,6 + fmr 7,7 + fmr 8,8 + fmr 9,9 + fmr 10,10 + fmr 11,11 + fmr 12,12 + fmr 13,13 + fmr 14,14 + fmr 15,15 + fmr 16,16 + fmr 17,17 + fmr 18,18 + fmr 19,19 + fmr 20,20 + fmr 21,21 + fmr 22,22 + fmr 23,23 + fmr 24,24 + fmr 25,25 + fmr 26,26 + fmr 27,27 + fmr 28,28 + fmr 29,29 + fmr 30,30 + fmr 31,31 FTR_SECTION_ELSE /* * To denormalise we need to move a copy of the register to itself. @@ -471,25 +495,39 @@ FTR_SECTION_ELSE oris r10,r10,MSR_VSX@h mtmsrd r10 sync - -#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1) -#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2) -#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4) -#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8) -#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16) - XVCPSGNDP32(0) - + XVCPSGNDP(0,0,0) + XVCPSGNDP(1,1,1) + XVCPSGNDP(2,2,2) + XVCPSGNDP(3,3,3) + XVCPSGNDP(4,4,4) + XVCPSGNDP(5,5,5) + XVCPSGNDP(6,6,6) + XVCPSGNDP(7,7,7) + XVCPSGNDP(8,8,8) + XVCPSGNDP(9,9,9) + XVCPSGNDP(10,10,10) + XVCPSGNDP(11,11,11) + XVCPSGNDP(12,12,12) + XVCPSGNDP(13,13,13) + XVCPSGNDP(14,14,14) + XVCPSGNDP(15,15,15) + XVCPSGNDP(16,16,16) + XVCPSGNDP(17,17,17) + XVCPSGNDP(18,18,18) + XVCPSGNDP(19,19,19) + XVCPSGNDP(20,20,20) + XVCPSGNDP(21,21,21) + XVCPSGNDP(22,22,22) + XVCPSGNDP(23,23,23) + XVCPSGNDP(24,24,24) + XVCPSGNDP(25,25,25) + XVCPSGNDP(26,26,26) + XVCPSGNDP(27,27,27) + XVCPSGNDP(28,28,28) + XVCPSGNDP(29,29,29) + XVCPSGNDP(30,30,30) + XVCPSGNDP(31,31,31) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) - -BEGIN_FTR_SECTION - b denorm_done -END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) -/* - * To denormalise we need to move a copy of the register to itself. - * For POWER8 we need to do that for all 64 VSX registers - */ - XVCPSGNDP32(32) -denorm_done: mtspr SPRN_HSRR0,r11 mtcrf 0x80,r9 ld r9,PACA_EXGEN+EX_R9(r13) @@ -683,7 +721,7 @@ machine_check_common: STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) - STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) + STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) #ifdef CONFIG_PPC_DOORBELL STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) diff --git a/trunk/arch/powerpc/kernel/irq.c b/trunk/arch/powerpc/kernel/irq.c index ea185e0b3cae..5cbcf4d5a808 100644 --- a/trunk/arch/powerpc/kernel/irq.c +++ b/trunk/arch/powerpc/kernel/irq.c @@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void) * in case we also had a rollover while hard disabled */ local_paca->irq_happened &= ~PACA_IRQ_DEC; - if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) + if (decrementer_check_overflow()) return 0x900; /* Finally check if an external interrupt happened */ diff --git a/trunk/arch/powerpc/kernel/machine_kexec_64.c b/trunk/arch/powerpc/kernel/machine_kexec_64.c index 611acdf30096..466a2908bb63 100644 --- a/trunk/arch/powerpc/kernel/machine_kexec_64.c +++ b/trunk/arch/powerpc/kernel/machine_kexec_64.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -336,13 +335,10 @@ void default_machine_kexec(struct kimage *image) pr_debug("kexec: Starting switchover sequence.\n"); /* switch to a staticly allocated stack. Based on irq stack code. - * We setup preempt_count to avoid using VMX in memcpy. * XXX: the task struct will likely be invalid once we do the copy! */ kexec_stack.thread_info.task = current_thread_info()->task; kexec_stack.thread_info.flags = 0; - kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET; - kexec_stack.thread_info.cpu = current_thread_info()->cpu; /* We need a static PACA, too; copy this CPU's PACA over and switch to * it. Also poison per_cpu_offset to catch anyone using non-static diff --git a/trunk/arch/powerpc/kernel/misc_32.S b/trunk/arch/powerpc/kernel/misc_32.S index e469f30e6eeb..19e096bd0e73 100644 --- a/trunk/arch/powerpc/kernel/misc_32.S +++ b/trunk/arch/powerpc/kernel/misc_32.S @@ -657,17 +657,6 @@ _GLOBAL(__ucmpdi2) li r3,2 blr -_GLOBAL(__bswapdi2) - rotlwi r9,r4,8 - rotlwi r10,r3,8 - rlwimi r9,r4,24,0,7 - rlwimi r10,r3,24,0,7 - rlwimi r9,r4,24,16,23 - rlwimi r10,r3,24,16,23 - mr r3,r9 - mr r4,r10 - blr - _GLOBAL(abs) srawi r4,r3,31 xor r3,r3,r4 diff --git a/trunk/arch/powerpc/kernel/misc_64.S b/trunk/arch/powerpc/kernel/misc_64.S index 6820e45f557b..5cfa8008693b 100644 --- a/trunk/arch/powerpc/kernel/misc_64.S +++ b/trunk/arch/powerpc/kernel/misc_64.S @@ -234,17 +234,6 @@ _GLOBAL(__flush_dcache_icache) isync blr -_GLOBAL(__bswapdi2) - srdi r8,r3,32 - rlwinm r7,r3,8,0xffffffff - rlwimi r7,r3,24,0,7 - rlwinm r9,r8,8,0xffffffff - rlwimi r7,r3,24,16,23 - rlwimi r9,r8,24,0,7 - rlwimi r9,r8,24,16,23 - sldi r7,r7,32 - or r3,r7,r9 - blr #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) /* diff --git a/trunk/arch/powerpc/kernel/pci-common.c b/trunk/arch/powerpc/kernel/pci-common.c index f46914a0f33e..f5c5c90799a7 100644 --- a/trunk/arch/powerpc/kernel/pci-common.c +++ b/trunk/arch/powerpc/kernel/pci-common.c @@ -359,6 +359,7 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, enum pci_mmap_state mmap_state, int write_combine) { + unsigned long prot = pgprot_val(protection); /* Write combine is always 0 on non-memory space mappings. On * memory space, if the user didn't pass 1, we check for a @@ -375,9 +376,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, /* XXX would be nice to have a way to ask for write-through */ if (write_combine) - return pgprot_noncached_wc(protection); + return pgprot_noncached_wc(prot); else - return pgprot_noncached(protection); + return pgprot_noncached(prot); } /* @@ -657,6 +658,15 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, * ranges. However, some machines (thanks Apple !) tend to split their * space into lots of small contiguous ranges. So we have to coalesce. * + * - We can only cope with all memory ranges having the same offset + * between CPU addresses and PCI addresses. Unfortunately, some bridges + * are setup for a large 1:1 mapping along with a small "window" which + * maps PCI address 0 to some arbitrary high address of the CPU space in + * order to give access to the ISA memory hole. + * The way out of here that I've chosen for now is to always set the + * offset based on the first resource found, then override it if we + * have a different offset and the previous was set by an ISA hole. + * * - Some busses have IO space not starting at 0, which causes trouble with * the way we do our IO resource renumbering. The code somewhat deals with * it for 64 bits but I would expect problems on 32 bits. @@ -671,9 +681,10 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, int rlen; int pna = of_n_addr_cells(dev); int np = pna + 5; - int memno = 0; + int memno = 0, isa_hole = -1; u32 pci_space; unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; + unsigned long long isa_mb = 0; struct resource *res; printk(KERN_INFO "PCI host bridge %s %s ranges:\n", @@ -767,6 +778,8 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, } /* Handles ISA memory hole space here */ if (pci_addr == 0) { + isa_mb = cpu_addr; + isa_hole = memno; if (primary || isa_mem_base == 0) isa_mem_base = cpu_addr; hose->isa_mem_phys = cpu_addr; @@ -827,7 +840,6 @@ static void pcibios_fixup_resources(struct pci_dev *dev) } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { struct resource *res = dev->resource + i; - struct pci_bus_region reg; if (!res->flags) continue; @@ -836,9 +848,8 @@ static void pcibios_fixup_resources(struct pci_dev *dev) * at 0 as unset as well, except if PCI_PROBE_ONLY is also set * since in that case, we don't want to re-assign anything */ - pcibios_resource_to_bus(dev, ®, res); if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || - (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { + (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { /* Only print message if not re-assigning */ if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " @@ -994,7 +1005,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus) ppc_md.pci_dma_bus_setup(bus); } -static void pcibios_setup_device(struct pci_dev *dev) +void pcibios_setup_device(struct pci_dev *dev) { /* Fixup NUMA node as it may not be setup yet by the generic * code and is needed by the DMA init @@ -1015,17 +1026,6 @@ static void pcibios_setup_device(struct pci_dev *dev) ppc_md.pci_irq_fixup(dev); } -int pcibios_add_device(struct pci_dev *dev) -{ - /* - * We can only call pcibios_setup_device() after bus setup is complete, - * since some of the platform specific DMA setup code depends on it. - */ - if (dev->bus->is_added) - pcibios_setup_device(dev); - return 0; -} - void pcibios_setup_bus_devices(struct pci_bus *bus) { struct pci_dev *dev; @@ -1480,6 +1480,10 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) if (ppc_md.pcibios_enable_device_hook(dev)) return -EINVAL; + /* avoid pcie irq fix up impact on cardbus */ + if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) + pcibios_setup_device(dev); + return pci_enable_resources(dev, mask); } @@ -1517,10 +1521,9 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, for (i = 0; i < 3; ++i) { res = &hose->mem_resources[i]; if (!res->flags) { - if (i == 0) - printk(KERN_ERR "PCI: Memory resource 0 not set for " - "host bridge %s (domain %d)\n", - hose->dn->full_name, hose->global_number); + printk(KERN_ERR "PCI: Memory resource 0 not set for " + "host bridge %s (domain %d)\n", + hose->dn->full_name, hose->global_number); continue; } offset = hose->mem_offset[i]; diff --git a/trunk/arch/powerpc/kernel/pci_64.c b/trunk/arch/powerpc/kernel/pci_64.c index 2e8629654ca8..873050d26840 100644 --- a/trunk/arch/powerpc/kernel/pci_64.c +++ b/trunk/arch/powerpc/kernel/pci_64.c @@ -266,13 +266,3 @@ int pcibus_to_node(struct pci_bus *bus) } EXPORT_SYMBOL(pcibus_to_node); #endif - -static void quirk_radeon_32bit_msi(struct pci_dev *dev) -{ - struct pci_dn *pdn = pci_get_pdn(dev); - - if (pdn) - pdn->force_32bit_msi = 1; -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi); diff --git a/trunk/arch/powerpc/kernel/pci_dn.c b/trunk/arch/powerpc/kernel/pci_dn.c index df038442548a..e7af165f8b9d 100644 --- a/trunk/arch/powerpc/kernel/pci_dn.c +++ b/trunk/arch/powerpc/kernel/pci_dn.c @@ -32,14 +32,6 @@ #include #include -struct pci_dn *pci_get_pdn(struct pci_dev *pdev) -{ - struct device_node *dn = pci_device_to_OF_node(pdev); - if (!dn) - return NULL; - return PCI_DN(dn); -} - /* * Traverse_func that inits the PCI fields of the device node. * NOTE: this *must* be done before read/write config to the device. diff --git a/trunk/arch/powerpc/kernel/ppc_ksyms.c b/trunk/arch/powerpc/kernel/ppc_ksyms.c index c29666586998..78b8766fd79e 100644 --- a/trunk/arch/powerpc/kernel/ppc_ksyms.c +++ b/trunk/arch/powerpc/kernel/ppc_ksyms.c @@ -143,8 +143,7 @@ EXPORT_SYMBOL(__lshrdi3); int __ucmpdi2(unsigned long long, unsigned long long); EXPORT_SYMBOL(__ucmpdi2); #endif -long long __bswapdi2(long long); -EXPORT_SYMBOL(__bswapdi2); + EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c index 076d1242507a..ceb4e7b62cf4 100644 --- a/trunk/arch/powerpc/kernel/process.c +++ b/trunk/arch/powerpc/kernel/process.c @@ -339,13 +339,6 @@ static void set_debug_reg_defaults(struct thread_struct *thread) static void prime_debug_regs(struct thread_struct *thread) { - /* - * We could have inherited MSR_DE from userspace, since - * it doesn't get cleared on exception entry. Make sure - * MSR_DE is clear before we enable any debug events. - */ - mtmsr(mfmsr() & ~MSR_DE); - mtspr(SPRN_IAC1, thread->iac1); mtspr(SPRN_IAC2, thread->iac2); #if CONFIG_PPC_ADV_DEBUG_IACS > 2 @@ -399,8 +392,7 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { mtspr(SPRN_DABR, dabr); - if (cpu_has_feature(CPU_FTR_DABRX)) - mtspr(SPRN_DABRX, dabrx); + mtspr(SPRN_DABRX, dabrx); return 0; } #else @@ -979,7 +971,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, * do some house keeping and then return from the fork or clone * system call, using the stack frame created above. */ - ((unsigned long *)sp)[0] = 0; sp -= sizeof(struct pt_regs); kregs = (struct pt_regs *) sp; sp -= STACK_FRAME_OVERHEAD; @@ -1369,7 +1360,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) #ifdef CONFIG_PPC64 /* Called with hard IRQs off */ -void notrace __ppc64_runlatch_on(void) +void __ppc64_runlatch_on(void) { struct thread_info *ti = current_thread_info(); unsigned long ctrl; @@ -1382,7 +1373,7 @@ void notrace __ppc64_runlatch_on(void) } /* Called with hard IRQs off */ -void notrace __ppc64_runlatch_off(void) +void __ppc64_runlatch_off(void) { struct thread_info *ti = current_thread_info(); unsigned long ctrl; diff --git a/trunk/arch/powerpc/kernel/ptrace.c b/trunk/arch/powerpc/kernel/ptrace.c index 98c2fc198712..3b14d320e69f 100644 --- a/trunk/arch/powerpc/kernel/ptrace.c +++ b/trunk/arch/powerpc/kernel/ptrace.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include @@ -1789,8 +1788,6 @@ long do_syscall_trace_enter(struct pt_regs *regs) { long ret = 0; - user_exit(); - secure_computing_strict(regs->gpr[0]); if (test_thread_flag(TIF_SYSCALL_TRACE) && @@ -1835,6 +1832,4 @@ void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); - - user_enter(); } diff --git a/trunk/arch/powerpc/kernel/rtas.c b/trunk/arch/powerpc/kernel/rtas.c index 52add6f3e201..1fd6e7b2f390 100644 --- a/trunk/arch/powerpc/kernel/rtas.c +++ b/trunk/arch/powerpc/kernel/rtas.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -808,95 +807,6 @@ static void rtas_percpu_suspend_me(void *info) __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); } -enum rtas_cpu_state { - DOWN, - UP, -}; - -#ifndef CONFIG_SMP -static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, - cpumask_var_t cpus) -{ - if (!cpumask_empty(cpus)) { - cpumask_clear(cpus); - return -EINVAL; - } else - return 0; -} -#else -/* On return cpumask will be altered to indicate CPUs changed. - * CPUs with states changed will be set in the mask, - * CPUs with status unchanged will be unset in the mask. */ -static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, - cpumask_var_t cpus) -{ - int cpu; - int cpuret = 0; - int ret = 0; - - if (cpumask_empty(cpus)) - return 0; - - for_each_cpu(cpu, cpus) { - switch (state) { - case DOWN: - cpuret = cpu_down(cpu); - break; - case UP: - cpuret = cpu_up(cpu); - break; - } - if (cpuret) { - pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", - __func__, - ((state == UP) ? "up" : "down"), - cpu, cpuret); - if (!ret) - ret = cpuret; - if (state == UP) { - /* clear bits for unchanged cpus, return */ - cpumask_shift_right(cpus, cpus, cpu); - cpumask_shift_left(cpus, cpus, cpu); - break; - } else { - /* clear bit for unchanged cpu, continue */ - cpumask_clear_cpu(cpu, cpus); - } - } - } - - return ret; -} -#endif - -int rtas_online_cpus_mask(cpumask_var_t cpus) -{ - int ret; - - ret = rtas_cpu_state_change_mask(UP, cpus); - - if (ret) { - cpumask_var_t tmp_mask; - - if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY)) - return ret; - - /* Use tmp_mask to preserve cpus mask from first failure */ - cpumask_copy(tmp_mask, cpus); - rtas_offline_cpus_mask(tmp_mask); - free_cpumask_var(tmp_mask); - } - - return ret; -} -EXPORT_SYMBOL(rtas_online_cpus_mask); - -int rtas_offline_cpus_mask(cpumask_var_t cpus) -{ - return rtas_cpu_state_change_mask(DOWN, cpus); -} -EXPORT_SYMBOL(rtas_offline_cpus_mask); - int rtas_ibm_suspend_me(struct rtas_args *args) { long state; @@ -904,8 +814,6 @@ int rtas_ibm_suspend_me(struct rtas_args *args) unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); - cpumask_var_t offline_mask; - int cpuret; if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; @@ -929,24 +837,11 @@ int rtas_ibm_suspend_me(struct rtas_args *args) return 0; } - if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) - return -ENOMEM; - atomic_set(&data.working, 0); atomic_set(&data.done, 0); atomic_set(&data.error, 0); data.token = rtas_token("ibm,suspend-me"); data.complete = &done; - - /* All present CPUs must be online */ - cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); - cpuret = rtas_online_cpus_mask(offline_mask); - if (cpuret) { - pr_err("%s: Could not bring present CPUs online.\n", __func__); - atomic_set(&data.error, cpuret); - goto out; - } - stop_topology_update(); /* Call function on all CPUs. One of us will make the @@ -962,14 +857,6 @@ int rtas_ibm_suspend_me(struct rtas_args *args) start_topology_update(); - /* Take down CPUs not online prior to suspend */ - cpuret = rtas_offline_cpus_mask(offline_mask); - if (cpuret) - pr_warn("%s: Could not restore CPUs to offline state.\n", - __func__); - -out: - free_cpumask_var(offline_mask); return atomic_read(&data.error); } #else /* CONFIG_PPC_PSERIES */ diff --git a/trunk/arch/powerpc/kernel/rtas_flash.c b/trunk/arch/powerpc/kernel/rtas_flash.c index 2f3cdb01506d..5b3022470126 100644 --- a/trunk/arch/powerpc/kernel/rtas_flash.c +++ b/trunk/arch/powerpc/kernel/rtas_flash.c @@ -89,7 +89,6 @@ /* Array sizes */ #define VALIDATE_BUF_SIZE 4096 -#define VALIDATE_MSG_LEN 256 #define RTAS_MSG_MAXLEN 64 /* Quirk - RTAS requires 4k list length and block size */ @@ -467,7 +466,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf) } static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, - char *msg, int msglen) + char *msg) { int n; @@ -475,8 +474,7 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, n = sprintf(msg, "%d\n", args_buf->update_results); if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || (args_buf->update_results == VALIDATE_TMP_UPDATE)) - n += snprintf(msg + n, msglen - n, "%s\n", - args_buf->buf); + n += sprintf(msg + n, "%s\n", args_buf->buf); } else { n = sprintf(msg, "%d\n", args_buf->status); } @@ -488,11 +486,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf, { struct rtas_validate_flash_t *const args_buf = &rtas_validate_flash_data; - char msg[VALIDATE_MSG_LEN]; + char msg[RTAS_MSG_MAXLEN]; int msglen; mutex_lock(&rtas_validate_flash_mutex); - msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN); + msglen = get_validate_flash_msg(args_buf, msg); mutex_unlock(&rtas_validate_flash_mutex); return simple_read_from_buffer(buf, count, ppos, msg, msglen); diff --git a/trunk/arch/powerpc/kernel/signal.c b/trunk/arch/powerpc/kernel/signal.c index 457e97aa2945..cf12eae02de5 100644 --- a/trunk/arch/powerpc/kernel/signal.c +++ b/trunk/arch/powerpc/kernel/signal.c @@ -13,12 +13,10 @@ #include #include #include -#include #include #include #include #include -#include #include "signal.h" @@ -26,18 +24,18 @@ * through debug.exception-trace sysctl. */ -int show_unhandled_signals = 1; +int show_unhandled_signals = 0; /* * Allocate space for the signal frame */ -void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, +void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, int is_32) { unsigned long oldsp, newsp; /* Default to using normal stack */ - oldsp = get_clean_sp(sp, is_32); + oldsp = get_clean_sp(regs, is_32); /* Check for alt stack */ if ((ka->sa.sa_flags & SA_ONSTACK) && @@ -161,8 +159,6 @@ static int do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { - user_exit(); - if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); @@ -173,41 +169,4 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } - - user_enter(); -} - -unsigned long get_tm_stackpointer(struct pt_regs *regs) -{ - /* When in an active transaction that takes a signal, we need to be - * careful with the stack. It's possible that the stack has moved back - * up after the tbegin. The obvious case here is when the tbegin is - * called inside a function that returns before a tend. In this case, - * the stack is part of the checkpointed transactional memory state. - * If we write over this non transactionally or in suspend, we are in - * trouble because if we get a tm abort, the program counter and stack - * pointer will be back at the tbegin but our in memory stack won't be - * valid anymore. - * - * To avoid this, when taking a signal in an active transaction, we - * need to use the stack pointer from the checkpointed state, rather - * than the speculated state. This ensures that the signal context - * (written tm suspended) will be written below the stack required for - * the rollback. The transaction is aborted becuase of the treclaim, - * so any memory written between the tbegin and the signal will be - * rolled back anyway. - * - * For signals taken in non-TM or suspended mode, we use the - * normal/non-checkpointed stack pointer. - */ - -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM - if (MSR_TM_ACTIVE(regs->msr)) { - tm_enable(); - tm_reclaim(¤t->thread, regs->msr, TM_CAUSE_SIGNAL); - if (MSR_TM_TRANSACTIONAL(regs->msr)) - return current->thread.ckpt_regs.gpr[1]; - } -#endif - return regs->gpr[1]; } diff --git a/trunk/arch/powerpc/kernel/signal.h b/trunk/arch/powerpc/kernel/signal.h index c69b9aeb9f23..ec84c901ceab 100644 --- a/trunk/arch/powerpc/kernel/signal.h +++ b/trunk/arch/powerpc/kernel/signal.h @@ -12,7 +12,7 @@ extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); -extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, +extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, int is_32); extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, diff --git a/trunk/arch/powerpc/kernel/signal_32.c b/trunk/arch/powerpc/kernel/signal_32.c index 201385c3a1ae..95068bf569ad 100644 --- a/trunk/arch/powerpc/kernel/signal_32.c +++ b/trunk/arch/powerpc/kernel/signal_32.c @@ -503,6 +503,12 @@ static int save_tm_user_regs(struct pt_regs *regs, { unsigned long msr = regs->msr; + /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs, + * thread.transact_fpr[], thread.transact_vr[], etc. + */ + tm_enable(); + tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL); + /* Make sure floating point registers are stored in regs */ flush_fp_to_thread(current); @@ -959,7 +965,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, /* Set up Signal Frame */ /* Put a Real Time Context onto stack */ - rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1); + rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1); addr = rt_sf; if (unlikely(rt_sf == NULL)) goto badframe; @@ -1397,7 +1403,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, unsigned long tramp; /* Set up Signal Frame */ - frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1); + frame = get_sigframe(ka, regs, sizeof(*frame), 1); if (unlikely(frame == NULL)) goto badframe; sc = (struct sigcontext __user *) &frame->sctx; diff --git a/trunk/arch/powerpc/kernel/signal_64.c b/trunk/arch/powerpc/kernel/signal_64.c index 345947367ec0..c1794286098c 100644 --- a/trunk/arch/powerpc/kernel/signal_64.c +++ b/trunk/arch/powerpc/kernel/signal_64.c @@ -154,12 +154,11 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, * As above, but Transactional Memory is in use, so deliver sigcontexts * containing checkpointed and transactional register states. * - * To do this, we treclaim (done before entering here) to gather both sets of - * registers and set up the 'normal' sigcontext registers with rolled-back - * register values such that a simple signal handler sees a correct - * checkpointed register state. If interested, a TM-aware sighandler can - * examine the transactional registers in the 2nd sigcontext to determine the - * real origin of the signal. + * To do this, we treclaim to gather both sets of registers and set up the + * 'normal' sigcontext registers with rolled-back register values such that a + * simple signal handler sees a correct checkpointed register state. + * If interested, a TM-aware sighandler can examine the transactional registers + * in the 2nd sigcontext to determine the real origin of the signal. */ static long setup_tm_sigcontexts(struct sigcontext __user *sc, struct sigcontext __user *tm_sc, @@ -185,6 +184,16 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, BUG_ON(!MSR_TM_ACTIVE(regs->msr)); + /* tm_reclaim rolls back all reg states, saving checkpointed (older) + * GPRs to thread.ckpt_regs and (if used) FPRs to (newer) + * thread.transact_fp and/or VRs to (newer) thread.transact_vr. + * THEN we save out FP/VRs, if necessary, to the checkpointed (older) + * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the + * stack, in *regs. + */ + tm_enable(); + tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL); + flush_fp_to_thread(current); #ifdef CONFIG_ALTIVEC @@ -702,7 +711,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, unsigned long newsp = 0; long err = 0; - frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0); + frame = get_sigframe(ka, regs, sizeof(*frame), 0); if (unlikely(frame == NULL)) goto badframe; diff --git a/trunk/arch/powerpc/kernel/traps.c b/trunk/arch/powerpc/kernel/traps.c index c0e5caf8ccc7..83efa2f7d926 100644 --- a/trunk/arch/powerpc/kernel/traps.c +++ b/trunk/arch/powerpc/kernel/traps.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include @@ -53,7 +52,6 @@ #ifdef CONFIG_PPC64 #include #include -#include #endif #include #include @@ -669,7 +667,6 @@ int machine_check_generic(struct pt_regs *regs) void machine_check_exception(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); int recover = 0; __get_cpu_var(irq_stat).mce_exceptions++; @@ -686,7 +683,7 @@ void machine_check_exception(struct pt_regs *regs) recover = cur_cpu_spec->machine_check(regs); if (recover > 0) - goto bail; + return; #if defined(CONFIG_8xx) && defined(CONFIG_PCI) /* the qspan pci read routines can cause machine checks -- Cort @@ -696,23 +693,20 @@ void machine_check_exception(struct pt_regs *regs) * -- BenH */ bad_page_fault(regs, regs->dar, SIGBUS); - goto bail; + return; #endif if (debugger_fault_handler(regs)) - goto bail; + return; if (check_io_access(regs)) - goto bail; + return; die("Machine check", regs, SIGBUS); /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) panic("Unrecoverable Machine check"); - -bail: - exception_exit(prev_state); } void SMIException(struct pt_regs *regs) @@ -722,29 +716,20 @@ void SMIException(struct pt_regs *regs) void unknown_exception(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); - printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", regs->nip, regs->msr, regs->trap); _exception(SIGTRAP, regs, 0, 0); - - exception_exit(prev_state); } void instruction_breakpoint_exception(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); - if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - goto bail; + return; if (debugger_iabr_match(regs)) - goto bail; + return; _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); - -bail: - exception_exit(prev_state); } void RunModeException(struct pt_regs *regs) @@ -754,20 +739,15 @@ void RunModeException(struct pt_regs *regs) void __kprobes single_step_exception(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); - clear_single_step(regs); if (notify_die(DIE_SSTEP, "single_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - goto bail; + return; if (debugger_sstep(regs)) - goto bail; + return; _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); - -bail: - exception_exit(prev_state); } /* @@ -933,28 +913,6 @@ static int emulate_isel(struct pt_regs *regs, u32 instword) return 0; } -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM -static inline bool tm_abort_check(struct pt_regs *regs, int cause) -{ - /* If we're emulating a load/store in an active transaction, we cannot - * emulate it as the kernel operates in transaction suspended context. - * We need to abort the transaction. This creates a persistent TM - * abort so tell the user what caused it with a new code. - */ - if (MSR_TM_TRANSACTIONAL(regs->msr)) { - tm_enable(); - tm_abort(cause); - return true; - } - return false; -} -#else -static inline bool tm_abort_check(struct pt_regs *regs, int reason) -{ - return false; -} -#endif - static int emulate_instruction(struct pt_regs *regs) { u32 instword; @@ -994,9 +952,6 @@ static int emulate_instruction(struct pt_regs *regs) /* Emulate load/store string insn. */ if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { - if (tm_abort_check(regs, - TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) - return -EINVAL; PPC_WARN_EMULATED(string, regs); return emulate_string_inst(regs, instword); } @@ -1050,7 +1005,6 @@ int is_valid_bugaddr(unsigned long addr) void __kprobes program_check_exception(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); unsigned int reason = get_reason(regs); extern int do_mathemu(struct pt_regs *regs); @@ -1060,26 +1014,26 @@ void __kprobes program_check_exception(struct pt_regs *regs) if (reason & REASON_FP) { /* IEEE FP exception */ parse_fpe(regs); - goto bail; + return; } if (reason & REASON_TRAP) { /* Debugger is first in line to stop recursive faults in * rcu_lock, notify_die, or atomic_notifier_call_chain */ if (debugger_bpt(regs)) - goto bail; + return; /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - goto bail; + return; if (!(regs->msr & MSR_PR) && /* not user-mode */ report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { regs->nip += 4; - goto bail; + return; } _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); - goto bail; + return; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (reason & REASON_TM) { @@ -1095,7 +1049,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) if (!user_mode(regs) && report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { regs->nip += 4; - goto bail; + return; } /* If usermode caused this, it's done something illegal and * gets a SIGILL slap on the wrist. We call it an illegal @@ -1105,7 +1059,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) */ if (user_mode(regs)) { _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); - goto bail; + return; } else { printk(KERN_EMERG "Unexpected TM Bad Thing exception " "at %lx (msr 0x%x)\n", regs->nip, reason); @@ -1129,16 +1083,16 @@ void __kprobes program_check_exception(struct pt_regs *regs) switch (do_mathemu(regs)) { case 0: emulate_single_step(regs); - goto bail; + return; case 1: { int code = 0; code = __parse_fpscr(current->thread.fpscr.val); _exception(SIGFPE, regs, code, regs->nip); - goto bail; + return; } case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); - goto bail; + return; } /* fall through on any other errors */ #endif /* CONFIG_MATH_EMULATION */ @@ -1149,10 +1103,10 @@ void __kprobes program_check_exception(struct pt_regs *regs) case 0: regs->nip += 4; emulate_single_step(regs); - goto bail; + return; case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); - goto bail; + return; } } @@ -1160,33 +1114,16 @@ void __kprobes program_check_exception(struct pt_regs *regs) _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); else _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - -bail: - exception_exit(prev_state); -} - -/* - * This occurs when running in hypervisor mode on POWER6 or later - * and an illegal instruction is encountered. - */ -void __kprobes emulation_assist_interrupt(struct pt_regs *regs) -{ - regs->msr |= REASON_ILLEGAL; - program_check_exception(regs); } void alignment_exception(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); int sig, code, fixed = 0; /* We restore the interrupt state now */ if (!arch_irq_disabled_regs(regs)) local_irq_enable(); - if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) - goto bail; - /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) fixed = fix_alignment(regs); @@ -1194,7 +1131,7 @@ void alignment_exception(struct pt_regs *regs) if (fixed == 1) { regs->nip += 4; /* skip over emulated instruction */ emulate_single_step(regs); - goto bail; + return; } /* Operand address was bad */ @@ -1209,9 +1146,6 @@ void alignment_exception(struct pt_regs *regs) _exception(sig, regs, code, regs->dar); else bad_page_fault(regs, regs->dar, sig); - -bail: - exception_exit(prev_state); } void StackOverflow(struct pt_regs *regs) @@ -1240,32 +1174,23 @@ void trace_syscall(struct pt_regs *regs) void kernel_fp_unavailable_exception(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); - printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); - - exception_exit(prev_state); } void altivec_unavailable_exception(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); - if (user_mode(regs)) { /* A user program has executed an altivec instruction, but this kernel doesn't support altivec. */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - goto bail; + return; } printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); - -bail: - exception_exit(prev_state); } void vsx_unavailable_exception(struct pt_regs *regs) diff --git a/trunk/arch/powerpc/kernel/udbg.c b/trunk/arch/powerpc/kernel/udbg.c index 9d3fdcd66290..13b867093499 100644 --- a/trunk/arch/powerpc/kernel/udbg.c +++ b/trunk/arch/powerpc/kernel/udbg.c @@ -64,9 +64,6 @@ void __init udbg_early_init(void) udbg_init_usbgecko(); #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) udbg_init_wsp(); -#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS) - /* In memory console */ - udbg_init_memcons(); #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) udbg_init_ehv_bc(); #elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) diff --git a/trunk/arch/powerpc/kvm/44x_tlb.c b/trunk/arch/powerpc/kvm/44x_tlb.c index ed0385448148..5dd3ab469976 100644 --- a/trunk/arch/powerpc/kvm/44x_tlb.c +++ b/trunk/arch/powerpc/kvm/44x_tlb.c @@ -441,7 +441,6 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_44x_tlbe *tlbe; unsigned int gtlb_index; - int idx; gtlb_index = kvmppc_get_gpr(vcpu, ra); if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { @@ -474,8 +473,6 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) return EMULATE_FAIL; } - idx = srcu_read_lock(&vcpu->kvm->srcu); - if (tlbe_is_host_safe(vcpu, tlbe)) { gva_t eaddr; gpa_t gpaddr; @@ -492,8 +489,6 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); } - srcu_read_unlock(&vcpu->kvm->srcu, idx); - trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2); diff --git a/trunk/arch/powerpc/kvm/book3s_hv.c b/trunk/arch/powerpc/kvm/book3s_hv.c index 550f5928b394..9de24f8e03c7 100644 --- a/trunk/arch/powerpc/kvm/book3s_hv.c +++ b/trunk/arch/powerpc/kvm/book3s_hv.c @@ -562,8 +562,6 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_CPPR: case H_EOI: case H_IPI: - case H_IPOLL: - case H_XIRR_X: if (kvmppc_xics_enabled(vcpu)) { ret = kvmppc_xics_hcall(vcpu, req); break; diff --git a/trunk/arch/powerpc/kvm/book3s_pr_papr.c b/trunk/arch/powerpc/kvm/book3s_pr_papr.c index da0e0bc268bd..b24309c6c2d5 100644 --- a/trunk/arch/powerpc/kvm/book3s_pr_papr.c +++ b/trunk/arch/powerpc/kvm/book3s_pr_papr.c @@ -257,8 +257,6 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) case H_CPPR: case H_EOI: case H_IPI: - case H_IPOLL: - case H_XIRR_X: if (kvmppc_xics_enabled(vcpu)) return kvmppc_h_pr_xics_hcall(vcpu, cmd); break; diff --git a/trunk/arch/powerpc/kvm/book3s_xics.c b/trunk/arch/powerpc/kvm/book3s_xics.c index 94c1dd46b83d..f7a103756618 100644 --- a/trunk/arch/powerpc/kvm/book3s_xics.c +++ b/trunk/arch/powerpc/kvm/book3s_xics.c @@ -650,23 +650,6 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, return H_SUCCESS; } -static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) -{ - union kvmppc_icp_state state; - struct kvmppc_icp *icp; - - icp = vcpu->arch.icp; - if (icp->server_num != server) { - icp = kvmppc_xics_find_server(vcpu->kvm, server); - if (!icp) - return H_PARAMETER; - } - state = ACCESS_ONCE(icp->state); - kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr); - kvmppc_set_gpr(vcpu, 5, state.mfrr); - return H_SUCCESS; -} - static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) { union kvmppc_icp_state old_state, new_state; @@ -804,18 +787,6 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) if (!xics || !vcpu->arch.icp) return H_HARDWARE; - /* These requests don't have real-mode implementations at present */ - switch (req) { - case H_XIRR_X: - res = kvmppc_h_xirr(vcpu); - kvmppc_set_gpr(vcpu, 4, res); - kvmppc_set_gpr(vcpu, 5, get_tb()); - return rc; - case H_IPOLL: - rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4)); - return rc; - } - /* Check for real mode returning too hard */ if (xics->real_mode) return kvmppc_xics_rm_complete(vcpu, req); diff --git a/trunk/arch/powerpc/kvm/booke.c b/trunk/arch/powerpc/kvm/booke.c index 1a1b51189773..1020119226db 100644 --- a/trunk/arch/powerpc/kvm/booke.c +++ b/trunk/arch/powerpc/kvm/booke.c @@ -673,6 +673,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ret = s; goto out; } + kvmppc_lazy_ee_enable(); kvm_guest_enter(); @@ -698,8 +699,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_load_guest_fp(vcpu); #endif - kvmppc_lazy_ee_enable(); - ret = __kvmppc_vcpu_run(kvm_run, vcpu); /* No need for kvm_guest_exit. It's done in handle_exit. @@ -833,18 +832,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, { int r = RESUME_HOST; int s; - int idx; - -#ifdef CONFIG_PPC64 - WARN_ON(local_paca->irq_happened != 0); -#endif - - /* - * We enter with interrupts disabled in hardware, but - * we need to call hard_irq_disable anyway to ensure that - * the software state is kept in sync. - */ - hard_irq_disable(); /* update before a new last_exit_type is rewritten */ kvmppc_update_timing_stats(vcpu); @@ -1066,8 +1053,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, break; } - idx = srcu_read_lock(&vcpu->kvm->srcu); - gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); gfn = gpaddr >> PAGE_SHIFT; @@ -1090,7 +1075,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_account_exit(vcpu, MMIO_EXITS); } - srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } @@ -1114,8 +1098,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); - idx = srcu_read_lock(&vcpu->kvm->srcu); - gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); gfn = gpaddr >> PAGE_SHIFT; @@ -1132,7 +1114,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); } - srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } diff --git a/trunk/arch/powerpc/kvm/e500_mmu.c b/trunk/arch/powerpc/kvm/e500_mmu.c index 6d6f153b6c1d..c41a5a96b558 100644 --- a/trunk/arch/powerpc/kvm/e500_mmu.c +++ b/trunk/arch/powerpc/kvm/e500_mmu.c @@ -396,7 +396,6 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) struct kvm_book3e_206_tlb_entry *gtlbe; int tlbsel, esel; int recal = 0; - int idx; tlbsel = get_tlb_tlbsel(vcpu); esel = get_tlb_esel(vcpu, tlbsel); @@ -431,8 +430,6 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) kvmppc_set_tlb1map_range(vcpu, gtlbe); } - idx = srcu_read_lock(&vcpu->kvm->srcu); - /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ if (tlbe_is_host_safe(vcpu, gtlbe)) { u64 eaddr = get_tlb_eaddr(gtlbe); @@ -447,8 +444,6 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); } - srcu_read_unlock(&vcpu->kvm->srcu, idx); - kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); return EMULATE_DONE; } diff --git a/trunk/arch/powerpc/kvm/e500mc.c b/trunk/arch/powerpc/kvm/e500mc.c index 19c8379575f7..753cc99eff2b 100644 --- a/trunk/arch/powerpc/kvm/e500mc.c +++ b/trunk/arch/powerpc/kvm/e500mc.c @@ -177,6 +177,8 @@ int kvmppc_core_check_processor_compat(void) r = 0; else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) r = 0; + else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0) + r = 0; else r = -ENOTSUPP; diff --git a/trunk/arch/powerpc/lib/copypage_power7.S b/trunk/arch/powerpc/lib/copypage_power7.S index 395c594722a2..0ef75bf0695c 100644 --- a/trunk/arch/powerpc/lib/copypage_power7.S +++ b/trunk/arch/powerpc/lib/copypage_power7.S @@ -28,14 +28,13 @@ _GLOBAL(copypage_power7) * aligned we don't need to clear the bottom 7 bits of either * address. */ - ori r9,r3,1 /* stream=1 => to */ + ori r9,r3,1 /* stream=1 */ #ifdef CONFIG_PPC_64K_PAGES - lis r7,0x0E01 /* depth=7 - * units/cachelines=512 */ + lis r7,0x0E01 /* depth=7, units=512 */ #else lis r7,0x0E00 /* depth=7 */ - ori r7,r7,0x1000 /* units/cachelines=32 */ + ori r7,r7,0x1000 /* units=32 */ #endif ori r10,r7,1 /* stream=1 */ @@ -44,14 +43,12 @@ _GLOBAL(copypage_power7) .machine push .machine "power4" - /* setup read stream 0 */ - dcbt r0,r4,0b01000 /* addr from */ - dcbt r0,r7,0b01010 /* length and depth from */ - /* setup write stream 1 */ - dcbtst r0,r9,0b01000 /* addr to */ - dcbtst r0,r10,0b01010 /* length and depth to */ + dcbt r0,r4,0b01000 + dcbt r0,r7,0b01010 + dcbtst r0,r9,0b01000 + dcbtst r0,r10,0b01010 eieio - dcbt r0,r8,0b01010 /* all streams GO */ + dcbt r0,r8,0b01010 /* GO */ .machine pop #ifdef CONFIG_ALTIVEC diff --git a/trunk/arch/powerpc/lib/copyuser_power7.S b/trunk/arch/powerpc/lib/copyuser_power7.S index d1f11795a7ad..0d24ff15f5f6 100644 --- a/trunk/arch/powerpc/lib/copyuser_power7.S +++ b/trunk/arch/powerpc/lib/copyuser_power7.S @@ -318,14 +318,12 @@ err1; stb r0,0(r3) .machine push .machine "power4" - /* setup read stream 0 */ - dcbt r0,r6,0b01000 /* addr from */ - dcbt r0,r7,0b01010 /* length and depth from */ - /* setup write stream 1 */ - dcbtst r0,r9,0b01000 /* addr to */ - dcbtst r0,r10,0b01010 /* length and depth to */ + dcbt r0,r6,0b01000 + dcbt r0,r7,0b01010 + dcbtst r0,r9,0b01000 + dcbtst r0,r10,0b01010 eieio - dcbt r0,r8,0b01010 /* all streams GO */ + dcbt r0,r8,0b01010 /* GO */ .machine pop beq cr1,.Lunwind_stack_nonvmx_copy diff --git a/trunk/arch/powerpc/mm/fault.c b/trunk/arch/powerpc/mm/fault.c index 8726779e1409..229951ffc351 100644 --- a/trunk/arch/powerpc/mm/fault.c +++ b/trunk/arch/powerpc/mm/fault.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include @@ -197,7 +196,6 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { - enum ctx_state prev_state = exception_enter(); struct vm_area_struct * vma; struct mm_struct *mm = current->mm; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -206,7 +204,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, int trap = TRAP(regs); int is_exec = trap == 0x400; int fault; - int rc = 0; #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) /* @@ -233,30 +230,28 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, * look at it */ if (error_code & ICSWX_DSI_UCT) { - rc = acop_handle_fault(regs, address, error_code); + int rc = acop_handle_fault(regs, address, error_code); if (rc) - goto bail; + return rc; } #endif /* CONFIG_PPC_ICSWX */ if (notify_page_fault(regs)) - goto bail; + return 0; if (unlikely(debugger_fault_handler(regs))) - goto bail; + return 0; /* On a kernel SLB miss we can only check for a valid exception entry */ - if (!user_mode(regs) && (address >= TASK_SIZE)) { - rc = SIGSEGV; - goto bail; - } + if (!user_mode(regs) && (address >= TASK_SIZE)) + return SIGSEGV; #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ defined(CONFIG_PPC_BOOK3S_64)) if (error_code & DSISR_DABRMATCH) { /* breakpoint match */ do_break(regs, address, error_code); - goto bail; + return 0; } #endif @@ -265,10 +260,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, local_irq_enable(); if (in_atomic() || mm == NULL) { - if (!user_mode(regs)) { - rc = SIGSEGV; - goto bail; - } + if (!user_mode(regs)) + return SIGSEGV; /* in_atomic() in user mode is really bad, as is current->mm == NULL. */ printk(KERN_EMERG "Page fault in user mode with " @@ -424,11 +417,9 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, */ fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { - rc = mm_fault_error(regs, address, fault); + int rc = mm_fault_error(regs, address, fault); if (rc >= MM_FAULT_RETURN) - goto bail; - else - rc = 0; + return rc; } /* @@ -463,7 +454,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, } up_read(&mm->mmap_sem); - goto bail; + return 0; bad_area: up_read(&mm->mmap_sem); @@ -472,7 +463,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { _exception(SIGSEGV, regs, code, address); - goto bail; + return 0; } if (is_exec && (error_code & DSISR_PROTFAULT)) @@ -480,11 +471,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, " page (%lx) - exploit attempt? (uid: %d)\n", address, from_kuid(&init_user_ns, current_uid())); - rc = SIGSEGV; - -bail: - exception_exit(prev_state); - return rc; + return SIGSEGV; } diff --git a/trunk/arch/powerpc/mm/hash_native_64.c b/trunk/arch/powerpc/mm/hash_native_64.c index 4c122c3f1623..6a2aead5b0e5 100644 --- a/trunk/arch/powerpc/mm/hash_native_64.c +++ b/trunk/arch/powerpc/mm/hash_native_64.c @@ -336,18 +336,11 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, hpte_v = hptep->v; actual_psize = hpte_actual_psize(hptep, psize); - /* - * We need to invalidate the TLB always because hpte_remove doesn't do - * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less - * random entry from it. When we do that we don't invalidate the TLB - * (hpte_remove) because we assume the old translation is still - * technically "valid". - */ if (actual_psize < 0) { - actual_psize = psize; - ret = -1; - goto err_out; + native_unlock_hpte(hptep); + return -1; } + /* Even if we miss, we need to invalidate the TLB */ if (!HPTE_V_COMPARE(hpte_v, want_v)) { DBG_LOW(" -> miss\n"); ret = -1; @@ -357,7 +350,6 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); } -err_out: native_unlock_hpte(hptep); /* Ensure it is out of the tlb too. */ @@ -417,7 +409,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, hptep = htab_address + slot; actual_psize = hpte_actual_psize(hptep, psize); if (actual_psize < 0) - actual_psize = psize; + return; /* Update the HPTE */ hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | @@ -445,27 +437,21 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, hpte_v = hptep->v; actual_psize = hpte_actual_psize(hptep, psize); - /* - * We need to invalidate the TLB always because hpte_remove doesn't do - * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less - * random entry from it. When we do that we don't invalidate the TLB - * (hpte_remove) because we assume the old translation is still - * technically "valid". - */ if (actual_psize < 0) { - actual_psize = psize; native_unlock_hpte(hptep); - goto err_out; + local_irq_restore(flags); + return; } + /* Even if we miss, we need to invalidate the TLB */ if (!HPTE_V_COMPARE(hpte_v, want_v)) native_unlock_hpte(hptep); else /* Invalidate the hpte. NOTE: this also unlocks it */ hptep->v = 0; -err_out: /* Invalidate the TLB */ tlbie(vpn, psize, actual_psize, ssize, local); + local_irq_restore(flags); } diff --git a/trunk/arch/powerpc/mm/hash_utils_64.c b/trunk/arch/powerpc/mm/hash_utils_64.c index e303a6d74e3a..88ac0eeaadde 100644 --- a/trunk/arch/powerpc/mm/hash_utils_64.c +++ b/trunk/arch/powerpc/mm/hash_utils_64.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include @@ -955,7 +954,6 @@ void hash_failure_debug(unsigned long ea, unsigned long access, */ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) { - enum ctx_state prev_state = exception_enter(); pgd_t *pgdir; unsigned long vsid; struct mm_struct *mm; @@ -975,8 +973,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) mm = current->mm; if (! mm) { DBG_LOW(" user region with no mm !\n"); - rc = 1; - goto bail; + return 1; } psize = get_slice_psize(mm, ea); ssize = user_segment_size(ea); @@ -995,23 +992,19 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) /* Not a valid range * Send the problem up to do_page_fault */ - rc = 1; - goto bail; + return 1; } DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); /* Bad address. */ if (!vsid) { DBG_LOW("Bad address!\n"); - rc = 1; - goto bail; + return 1; } /* Get pgdir */ pgdir = mm->pgd; - if (pgdir == NULL) { - rc = 1; - goto bail; - } + if (pgdir == NULL) + return 1; /* Check CPU locality */ tmp = cpumask_of(smp_processor_id()); @@ -1034,8 +1027,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); if (ptep == NULL || !pte_present(*ptep)) { DBG_LOW(" no PTE !\n"); - rc = 1; - goto bail; + return 1; } /* Add _PAGE_PRESENT to the required access perm */ @@ -1046,16 +1038,13 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) */ if (access & ~pte_val(*ptep)) { DBG_LOW(" no access !\n"); - rc = 1; - goto bail; + return 1; } #ifdef CONFIG_HUGETLB_PAGE - if (hugeshift) { - rc = __hash_page_huge(ea, access, vsid, ptep, trap, local, + if (hugeshift) + return __hash_page_huge(ea, access, vsid, ptep, trap, local, ssize, hugeshift, psize); - goto bail; - } #endif /* CONFIG_HUGETLB_PAGE */ #ifndef CONFIG_PPC_64K_PAGES @@ -1135,9 +1124,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) pte_val(*(ptep + PTRS_PER_PTE))); #endif DBG_LOW(" -> rc=%d\n", rc); - -bail: - exception_exit(prev_state); return rc; } EXPORT_SYMBOL_GPL(hash_page); @@ -1273,8 +1259,6 @@ void flush_hash_range(unsigned long number, int local) */ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) { - enum ctx_state prev_state = exception_enter(); - if (user_mode(regs)) { #ifdef CONFIG_PPC_SUBPAGE_PROT if (rc == -2) @@ -1284,8 +1268,6 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) _exception(SIGBUS, regs, BUS_ADRERR, address); } else bad_page_fault(regs, address, SIGBUS); - - exception_exit(prev_state); } long hpte_insert_repeating(unsigned long hash, unsigned long vpn, diff --git a/trunk/arch/powerpc/mm/hugetlbpage.c b/trunk/arch/powerpc/mm/hugetlbpage.c index 77fdd2cef33b..237c8e5f2640 100644 --- a/trunk/arch/powerpc/mm/hugetlbpage.c +++ b/trunk/arch/powerpc/mm/hugetlbpage.c @@ -592,14 +592,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, do { pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); - if (!is_hugepd(pmd)) { - /* - * if it is not hugepd pointer, we should already find - * it cleared. - */ - WARN_ON(!pmd_none_or_clear_bad(pmd)); + if (pmd_none_or_clear_bad(pmd)) continue; - } #ifdef CONFIG_PPC_FSL_BOOK3E /* * Increment next by the size of the huge mapping since diff --git a/trunk/arch/powerpc/mm/init_64.c b/trunk/arch/powerpc/mm/init_64.c index a90b9c458990..c2787bf779ca 100644 --- a/trunk/arch/powerpc/mm/init_64.c +++ b/trunk/arch/powerpc/mm/init_64.c @@ -215,8 +215,7 @@ static void __meminit vmemmap_create_mapping(unsigned long start, unsigned long phys) { int mapped = htab_bolt_mapping(start, start + page_size, phys, - pgprot_val(PAGE_KERNEL), - mmu_vmemmap_psize, + PAGE_KERNEL, mmu_vmemmap_psize, mmu_kernel_ssize); BUG_ON(mapped < 0); } diff --git a/trunk/arch/powerpc/perf/core-book3s.c b/trunk/arch/powerpc/perf/core-book3s.c index 29c6482890c8..c627843c5b2e 100644 --- a/trunk/arch/powerpc/perf/core-book3s.c +++ b/trunk/arch/powerpc/perf/core-book3s.c @@ -13,13 +13,11 @@ #include #include #include -#include #include #include #include #include #include -#include #define BHRB_MAX_ENTRIES 32 #define BHRB_TARGET 0x0000000000000002 @@ -102,15 +100,11 @@ static inline int siar_valid(struct pt_regs *regs) return 1; } -static inline void power_pmu_bhrb_enable(struct perf_event *event) {} -static inline void power_pmu_bhrb_disable(struct perf_event *event) {} -void power_pmu_flush_branch_stack(void) {} -static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} #endif /* CONFIG_PPC32 */ static bool regs_use_siar(struct pt_regs *regs) { - return !!regs->result; + return !!(regs->result & 1); } /* @@ -136,30 +130,22 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs) * If we're not doing instruction sampling, give them the SDAR * (sampled data address). If we are doing instruction sampling, then * only give them the SDAR if it corresponds to the instruction - * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the - * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER. + * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or + * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA. */ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { unsigned long mmcra = regs->dsisr; - bool sdar_valid; + unsigned long sdsync; - if (ppmu->flags & PPMU_HAS_SIER) - sdar_valid = regs->dar & SIER_SDAR_VALID; - else { - unsigned long sdsync; - - if (ppmu->flags & PPMU_SIAR_VALID) - sdsync = POWER7P_MMCRA_SDAR_VALID; - else if (ppmu->flags & PPMU_ALT_SIPR) - sdsync = POWER6_MMCRA_SDSYNC; - else - sdsync = MMCRA_SDSYNC; - - sdar_valid = mmcra & sdsync; - } + if (ppmu->flags & PPMU_SIAR_VALID) + sdsync = POWER7P_MMCRA_SDAR_VALID; + else if (ppmu->flags & PPMU_ALT_SIPR) + sdsync = POWER6_MMCRA_SDSYNC; + else + sdsync = MMCRA_SDSYNC; - if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid) + if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) *addrp = mfspr(SPRN_SDAR); } @@ -189,6 +175,11 @@ static bool regs_sipr(struct pt_regs *regs) return !!(regs->dsisr & sipr); } +static bool regs_no_sipr(struct pt_regs *regs) +{ + return !!(regs->result & 2); +} + static inline u32 perf_flags_from_msr(struct pt_regs *regs) { if (regs->msr & MSR_PR) @@ -211,7 +202,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs) * SIAR which should give slightly more reliable * results */ - if (ppmu->flags & PPMU_NO_SIPR) { + if (regs_no_sipr(regs)) { unsigned long siar = mfspr(SPRN_SIAR); if (siar >= PAGE_OFFSET) return PERF_RECORD_MISC_KERNEL; @@ -242,9 +233,22 @@ static inline void perf_read_regs(struct pt_regs *regs) int use_siar; regs->dsisr = mmcra; + regs->result = 0; + + if (ppmu->flags & PPMU_NO_SIPR) + regs->result |= 2; + + /* + * On power8 if we're in random sampling mode, the SIER is updated. + * If we're in continuous sampling mode, we don't have SIPR. + */ + if (ppmu->flags & PPMU_HAS_SIER) { + if (marked) + regs->dar = mfspr(SPRN_SIER); + else + regs->result |= 2; + } - if (ppmu->flags & PPMU_HAS_SIER) - regs->dar = mfspr(SPRN_SIER); /* * If this isn't a PMU exception (eg a software event) the SIAR is @@ -269,12 +273,12 @@ static inline void perf_read_regs(struct pt_regs *regs) use_siar = 1; else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) use_siar = 0; - else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs)) + else if (!regs_no_sipr(regs) && regs_sipr(regs)) use_siar = 0; else use_siar = 1; - regs->result = use_siar; + regs->result |= use_siar; } /* @@ -298,170 +302,12 @@ static inline int siar_valid(struct pt_regs *regs) unsigned long mmcra = regs->dsisr; int marked = mmcra & MMCRA_SAMPLE_ENABLE; - if (marked) { - if (ppmu->flags & PPMU_HAS_SIER) - return regs->dar & SIER_SIAR_VALID; - - if (ppmu->flags & PPMU_SIAR_VALID) - return mmcra & POWER7P_MMCRA_SIAR_VALID; - } + if ((ppmu->flags & PPMU_SIAR_VALID) && marked) + return mmcra & POWER7P_MMCRA_SIAR_VALID; return 1; } - -/* Reset all possible BHRB entries */ -static void power_pmu_bhrb_reset(void) -{ - asm volatile(PPC_CLRBHRB); -} - -static void power_pmu_bhrb_enable(struct perf_event *event) -{ - struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); - - if (!ppmu->bhrb_nr) - return; - - /* Clear BHRB if we changed task context to avoid data leaks */ - if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { - power_pmu_bhrb_reset(); - cpuhw->bhrb_context = event->ctx; - } - cpuhw->bhrb_users++; -} - -static void power_pmu_bhrb_disable(struct perf_event *event) -{ - struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); - - if (!ppmu->bhrb_nr) - return; - - cpuhw->bhrb_users--; - WARN_ON_ONCE(cpuhw->bhrb_users < 0); - - if (!cpuhw->disabled && !cpuhw->bhrb_users) { - /* BHRB cannot be turned off when other - * events are active on the PMU. - */ - - /* avoid stale pointer */ - cpuhw->bhrb_context = NULL; - } -} - -/* Called from ctxsw to prevent one process's branch entries to - * mingle with the other process's entries during context switch. - */ -void power_pmu_flush_branch_stack(void) -{ - if (ppmu->bhrb_nr) - power_pmu_bhrb_reset(); -} -/* Calculate the to address for a branch */ -static __u64 power_pmu_bhrb_to(u64 addr) -{ - unsigned int instr; - int ret; - __u64 target; - - if (is_kernel_addr(addr)) - return branch_target((unsigned int *)addr); - - /* Userspace: need copy instruction here then translate it */ - pagefault_disable(); - ret = __get_user_inatomic(instr, (unsigned int __user *)addr); - if (ret) { - pagefault_enable(); - return 0; - } - pagefault_enable(); - - target = branch_target(&instr); - if ((!target) || (instr & BRANCH_ABSOLUTE)) - return target; - - /* Translate relative branch target from kernel to user address */ - return target - (unsigned long)&instr + addr; -} - -/* Processing BHRB entries */ -void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) -{ - u64 val; - u64 addr; - int r_index, u_index, pred; - - r_index = 0; - u_index = 0; - while (r_index < ppmu->bhrb_nr) { - /* Assembly read function */ - val = read_bhrb(r_index++); - if (!val) - /* Terminal marker: End of valid BHRB entries */ - break; - else { - addr = val & BHRB_EA; - pred = val & BHRB_PREDICTION; - - if (!addr) - /* invalid entry */ - continue; - - /* Branches are read most recent first (ie. mfbhrb 0 is - * the most recent branch). - * There are two types of valid entries: - * 1) a target entry which is the to address of a - * computed goto like a blr,bctr,btar. The next - * entry read from the bhrb will be branch - * corresponding to this target (ie. the actual - * blr/bctr/btar instruction). - * 2) a from address which is an actual branch. If a - * target entry proceeds this, then this is the - * matching branch for that target. If this is not - * following a target entry, then this is a branch - * where the target is given as an immediate field - * in the instruction (ie. an i or b form branch). - * In this case we need to read the instruction from - * memory to determine the target/to address. - */ - - if (val & BHRB_TARGET) { - /* Target branches use two entries - * (ie. computed gotos/XL form) - */ - cpuhw->bhrb_entries[u_index].to = addr; - cpuhw->bhrb_entries[u_index].mispred = pred; - cpuhw->bhrb_entries[u_index].predicted = ~pred; - - /* Get from address in next entry */ - val = read_bhrb(r_index++); - addr = val & BHRB_EA; - if (val & BHRB_TARGET) { - /* Shouldn't have two targets in a - row.. Reset index and try again */ - r_index--; - addr = 0; - } - cpuhw->bhrb_entries[u_index].from = addr; - } else { - /* Branches to immediate field - (ie I or B form) */ - cpuhw->bhrb_entries[u_index].from = addr; - cpuhw->bhrb_entries[u_index].to = - power_pmu_bhrb_to(addr); - cpuhw->bhrb_entries[u_index].mispred = pred; - cpuhw->bhrb_entries[u_index].predicted = ~pred; - } - u_index++; - - } - } - cpuhw->bhrb_stack.nr = u_index; - return; -} - #endif /* CONFIG_PPC64 */ static void perf_event_interrupt(struct pt_regs *regs); @@ -1058,6 +904,47 @@ static int collect_events(struct perf_event *group, int max_count, return n; } +/* Reset all possible BHRB entries */ +static void power_pmu_bhrb_reset(void) +{ + asm volatile(PPC_CLRBHRB); +} + +void power_pmu_bhrb_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + if (!ppmu->bhrb_nr) + return; + + /* Clear BHRB if we changed task context to avoid data leaks */ + if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { + power_pmu_bhrb_reset(); + cpuhw->bhrb_context = event->ctx; + } + cpuhw->bhrb_users++; +} + +void power_pmu_bhrb_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + if (!ppmu->bhrb_nr) + return; + + cpuhw->bhrb_users--; + WARN_ON_ONCE(cpuhw->bhrb_users < 0); + + if (!cpuhw->disabled && !cpuhw->bhrb_users) { + /* BHRB cannot be turned off when other + * events are active on the PMU. + */ + + /* avoid stale pointer */ + cpuhw->bhrb_context = NULL; + } +} + /* * Add a event to the PMU. * If all events are not already frozen, then we disable and @@ -1293,6 +1180,15 @@ int power_pmu_commit_txn(struct pmu *pmu) return 0; } +/* Called from ctxsw to prevent one process's branch entries to + * mingle with the other process's entries during context switch. + */ +void power_pmu_flush_branch_stack(void) +{ + if (ppmu->bhrb_nr) + power_pmu_bhrb_reset(); +} + /* * Return 1 if we might be able to put event on a limited PMC, * or 0 if not. @@ -1562,6 +1458,77 @@ struct pmu power_pmu = { .flush_branch_stack = power_pmu_flush_branch_stack, }; +/* Processing BHRB entries */ +void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) +{ + u64 val; + u64 addr; + int r_index, u_index, target, pred; + + r_index = 0; + u_index = 0; + while (r_index < ppmu->bhrb_nr) { + /* Assembly read function */ + val = read_bhrb(r_index); + + /* Terminal marker: End of valid BHRB entries */ + if (val == 0) { + break; + } else { + /* BHRB field break up */ + addr = val & BHRB_EA; + pred = val & BHRB_PREDICTION; + target = val & BHRB_TARGET; + + /* Probable Missed entry: Not applicable for POWER8 */ + if ((addr == 0) && (target == 0) && (pred == 1)) { + r_index++; + continue; + } + + /* Real Missed entry: Power8 based missed entry */ + if ((addr == 0) && (target == 1) && (pred == 1)) { + r_index++; + continue; + } + + /* Reserved condition: Not a valid entry */ + if ((addr == 0) && (target == 1) && (pred == 0)) { + r_index++; + continue; + } + + /* Is a target address */ + if (val & BHRB_TARGET) { + /* First address cannot be a target address */ + if (r_index == 0) { + r_index++; + continue; + } + + /* Update target address for the previous entry */ + cpuhw->bhrb_entries[u_index - 1].to = addr; + cpuhw->bhrb_entries[u_index - 1].mispred = pred; + cpuhw->bhrb_entries[u_index - 1].predicted = ~pred; + + /* Dont increment u_index */ + r_index++; + } else { + /* Update address, flags for current entry */ + cpuhw->bhrb_entries[u_index].from = addr; + cpuhw->bhrb_entries[u_index].mispred = pred; + cpuhw->bhrb_entries[u_index].predicted = ~pred; + + /* Successfully popullated one entry */ + u_index++; + r_index++; + } + } + } + cpuhw->bhrb_stack.nr = u_index; + return; +} + /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled @@ -1758,7 +1725,7 @@ static void perf_event_interrupt(struct pt_regs *regs) } } } - if (!found && !nmi && printk_ratelimit()) + if ((!found) && printk_ratelimit()) printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); /* diff --git a/trunk/arch/powerpc/platforms/Kconfig b/trunk/arch/powerpc/platforms/Kconfig index b62aab3e22ec..a881232a3cce 100644 --- a/trunk/arch/powerpc/platforms/Kconfig +++ b/trunk/arch/powerpc/platforms/Kconfig @@ -128,7 +128,7 @@ config PPC_RTAS_DAEMON config RTAS_PROC bool "Proc interface to RTAS" - depends on PPC_RTAS && PROC_FS + depends on PPC_RTAS default y config RTAS_FLASH diff --git a/trunk/arch/powerpc/platforms/cell/spufs/inode.c b/trunk/arch/powerpc/platforms/cell/spufs/inode.c index f3900427ffab..35f77a42bedf 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/inode.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/inode.c @@ -238,7 +238,7 @@ const struct file_operations spufs_context_fops = { .release = spufs_dir_close, .llseek = dcache_dir_lseek, .read = generic_read_dir, - .iterate = dcache_readdir, + .readdir = dcache_readdir, .fsync = noop_fsync, }; EXPORT_SYMBOL_GPL(spufs_context_fops); diff --git a/trunk/arch/powerpc/platforms/powernv/Kconfig b/trunk/arch/powerpc/platforms/powernv/Kconfig index c24684c818ab..d3e840d643af 100644 --- a/trunk/arch/powerpc/platforms/powernv/Kconfig +++ b/trunk/arch/powerpc/platforms/powernv/Kconfig @@ -6,7 +6,6 @@ config PPC_POWERNV select PPC_ICP_NATIVE select PPC_P7_NAP select PPC_PCI_CHOICE if EMBEDDED - select EPAPR_BOOT default y config POWERNV_MSI diff --git a/trunk/arch/powerpc/platforms/powernv/opal.c b/trunk/arch/powerpc/platforms/powernv/opal.c index 628c564ceadb..ade4463226c6 100644 --- a/trunk/arch/powerpc/platforms/powernv/opal.c +++ b/trunk/arch/powerpc/platforms/powernv/opal.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -29,8 +28,6 @@ struct opal { static struct device_node *opal_node; static DEFINE_SPINLOCK(opal_write_lock); extern u64 opal_mc_secondary_handler[]; -static unsigned int *opal_irqs; -static unsigned int opal_irq_count; int __init early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data) @@ -56,11 +53,7 @@ int __init early_init_dt_scan_opal(unsigned long node, opal.entry, entryp, entrysz); powerpc_firmware_features |= FW_FEATURE_OPAL; - if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { - powerpc_firmware_features |= FW_FEATURE_OPALv2; - powerpc_firmware_features |= FW_FEATURE_OPALv3; - printk("OPAL V3 detected !\n"); - } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { + if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { powerpc_firmware_features |= FW_FEATURE_OPALv2; printk("OPAL V2 detected !\n"); } else { @@ -151,13 +144,6 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { len = total_len; rc = opal_console_write(vtermno, &len, data); - - /* Closed or other error drop */ - if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && - rc != OPAL_BUSY_EVENT) { - written = total_len; - break; - } if (rc == OPAL_SUCCESS) { total_len -= len; data += len; @@ -330,8 +316,6 @@ static int __init opal_init(void) irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); pr_debug("opal: Found %d interrupts reserved for OPAL\n", irqs ? (irqlen / 4) : 0); - opal_irq_count = irqlen / 4; - opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { unsigned int hwirq = be32_to_cpup(irqs); unsigned int irq = irq_create_mapping(NULL, hwirq); @@ -343,19 +327,7 @@ static int __init opal_init(void) if (rc) pr_warning("opal: Error %d requesting irq %d" " (0x%x)\n", rc, irq, hwirq); - opal_irqs[i] = irq; } return 0; } subsys_initcall(opal_init); - -void opal_shutdown(void) -{ - unsigned int i; - - for (i = 0; i < opal_irq_count; i++) { - if (opal_irqs[i]) - free_irq(opal_irqs[i], 0); - opal_irqs[i] = 0; - } -} diff --git a/trunk/arch/powerpc/platforms/powernv/pci-ioda.c b/trunk/arch/powerpc/platforms/powernv/pci-ioda.c index 9c9d15e4cdf2..1da578b7c1bf 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/trunk/arch/powerpc/platforms/powernv/pci-ioda.c @@ -68,6 +68,16 @@ define_pe_printk_level(pe_err, KERN_ERR); define_pe_printk_level(pe_warn, KERN_WARNING); define_pe_printk_level(pe_info, KERN_INFO); +static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) +{ + struct device_node *np; + + np = pci_device_to_OF_node(dev); + if (!np) + return NULL; + return PCI_DN(np); +} + static int pnv_ioda_alloc_pe(struct pnv_phb *phb) { unsigned long pe; @@ -100,7 +110,7 @@ static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pci_get_pdn(dev); + struct pci_dn *pdn = pnv_ioda_get_pdn(dev); if (!pdn) return NULL; @@ -163,7 +173,7 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) /* Add to all parents PELT-V */ while (parent) { - struct pci_dn *pdn = pci_get_pdn(parent); + struct pci_dn *pdn = pnv_ioda_get_pdn(parent); if (pdn && pdn->pe_number != IODA_INVALID_PE) { rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); @@ -242,7 +252,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pci_get_pdn(dev); + struct pci_dn *pdn = pnv_ioda_get_pdn(dev); struct pnv_ioda_pe *pe; int pe_num; @@ -313,7 +323,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { - struct pci_dn *pdn = pci_get_pdn(dev); + struct pci_dn *pdn = pnv_ioda_get_pdn(dev); if (pdn == NULL) { pr_warn("%s: No device node associated with device !\n", @@ -426,7 +436,7 @@ static void pnv_pci_ioda_setup_PEs(void) static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) { - struct pci_dn *pdn = pci_get_pdn(pdev); + struct pci_dn *pdn = pnv_ioda_get_pdn(pdev); struct pnv_ioda_pe *pe; /* @@ -758,7 +768,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, unsigned int is_64, struct msi_msg *msg) { struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); - struct pci_dn *pdn = pci_get_pdn(dev); struct irq_data *idata; struct irq_chip *ichip; unsigned int xive_num = hwirq - phb->msi_base; @@ -774,10 +783,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, if (pe->mve_number < 0) return -ENXIO; - /* Force 32-bit MSI on some broken devices */ - if (pdn && pdn->force_32bit_msi) - is_64 = 0; - /* Assign XIVE to PE */ rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); if (rc) { @@ -1030,7 +1035,7 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev) if (!phb->initialized) return 0; - pdn = pci_get_pdn(dev); + pdn = pnv_ioda_get_pdn(dev); if (!pdn || pdn->pe_number == IODA_INVALID_PE) return -EINVAL; @@ -1043,12 +1048,6 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus, return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; } -static void pnv_pci_ioda_shutdown(struct pnv_phb *phb) -{ - opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET, - OPAL_ASSERT_RESET); -} - void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) { struct pci_controller *hose; @@ -1179,9 +1178,6 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) /* Setup TCEs */ phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; - /* Setup shutdown function for kexec */ - phb->shutdown = pnv_pci_ioda_shutdown; - /* Setup MSI support */ pnv_pci_init_ioda_msis(phb); diff --git a/trunk/arch/powerpc/platforms/powernv/pci.c b/trunk/arch/powerpc/platforms/powernv/pci.c index 277343cc6a3d..55dfca844ddf 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci.c +++ b/trunk/arch/powerpc/platforms/powernv/pci.c @@ -47,10 +47,6 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pci_get_pdn(pdev); - - if (pdn && pdn->force_32bit_msi && !phb->msi32_support) - return -ENODEV; return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV; } @@ -371,7 +367,7 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages) while (npages--) *(tcep++) = 0; - if (tbl->it_type & TCE_PCI_SWINV_FREE) + if (tbl->it_type & TCE_PCI_SWINV_CREATE) pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1); } @@ -454,18 +450,6 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) pnv_pci_dma_fallback_setup(hose, pdev); } -void pnv_pci_shutdown(void) -{ - struct pci_controller *hose; - - list_for_each_entry(hose, &hose_list, list_node) { - struct pnv_phb *phb = hose->private_data; - - if (phb && phb->shutdown) - phb->shutdown(phb); - } -} - /* Fixup wrong class code in p7ioc and p8 root complex */ static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) { diff --git a/trunk/arch/powerpc/platforms/powernv/pci.h b/trunk/arch/powerpc/platforms/powernv/pci.h index 25d76c4df50b..48dc4bb856a1 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci.h +++ b/trunk/arch/powerpc/platforms/powernv/pci.h @@ -86,7 +86,6 @@ struct pnv_phb { void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); void (*fixup_phb)(struct pci_controller *hose); u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); - void (*shutdown)(struct pnv_phb *phb); union { struct { @@ -159,5 +158,4 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np); extern void pnv_pci_init_ioda2_phb(struct device_node *np); extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, u64 *startp, u64 *endp); - #endif /* __POWERNV_PCI_H */ diff --git a/trunk/arch/powerpc/platforms/powernv/powernv.h b/trunk/arch/powerpc/platforms/powernv/powernv.h index a1c6f83fc391..8a9df7f9667e 100644 --- a/trunk/arch/powerpc/platforms/powernv/powernv.h +++ b/trunk/arch/powerpc/platforms/powernv/powernv.h @@ -9,10 +9,8 @@ static inline void pnv_smp_init(void) { } #ifdef CONFIG_PCI extern void pnv_pci_init(void); -extern void pnv_pci_shutdown(void); #else static inline void pnv_pci_init(void) { } -static inline void pnv_pci_shutdown(void) { } #endif #endif /* _POWERNV_H */ diff --git a/trunk/arch/powerpc/platforms/powernv/setup.c b/trunk/arch/powerpc/platforms/powernv/setup.c index d4459bfc92f7..db1ad1c8f68f 100644 --- a/trunk/arch/powerpc/platforms/powernv/setup.c +++ b/trunk/arch/powerpc/platforms/powernv/setup.c @@ -78,9 +78,7 @@ static void pnv_show_cpuinfo(struct seq_file *m) if (root) model = of_get_property(root, "model", NULL); seq_printf(m, "machine\t\t: PowerNV %s\n", model); - if (firmware_has_feature(FW_FEATURE_OPALv3)) - seq_printf(m, "firmware\t: OPAL v3\n"); - else if (firmware_has_feature(FW_FEATURE_OPALv2)) + if (firmware_has_feature(FW_FEATURE_OPALv2)) seq_printf(m, "firmware\t: OPAL v2\n"); else if (firmware_has_feature(FW_FEATURE_OPAL)) seq_printf(m, "firmware\t: OPAL v1\n"); @@ -128,17 +126,6 @@ static void pnv_progress(char *s, unsigned short hex) { } -static void pnv_shutdown(void) -{ - /* Let the PCI code clear up IODA tables */ - pnv_pci_shutdown(); - - /* And unregister all OPAL interrupts so they don't fire - * up while we kexec - */ - opal_shutdown(); -} - #ifdef CONFIG_KEXEC static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) { @@ -200,7 +187,6 @@ define_machine(powernv) { .init_IRQ = pnv_init_IRQ, .show_cpuinfo = pnv_show_cpuinfo, .progress = pnv_progress, - .machine_shutdown = pnv_shutdown, .power_save = power7_idle, .calibrate_decr = generic_calibrate_decr, #ifdef CONFIG_KEXEC diff --git a/trunk/arch/powerpc/platforms/powernv/smp.c b/trunk/arch/powerpc/platforms/powernv/smp.c index 88c9459c3e07..6a3ecca5b725 100644 --- a/trunk/arch/powerpc/platforms/powernv/smp.c +++ b/trunk/arch/powerpc/platforms/powernv/smp.c @@ -71,68 +71,18 @@ int pnv_smp_kick_cpu(int nr) BUG_ON(nr < 0 || nr >= NR_CPUS); - /* - * If we already started or OPALv2 is not supported, we just - * kick the CPU via the PACA + /* On OPAL v2 the CPU are still spinning inside OPAL itself, + * get them back now */ - if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2)) - goto kick; - - /* - * At this point, the CPU can either be spinning on the way in - * from kexec or be inside OPAL waiting to be started for the - * first time. OPAL v3 allows us to query OPAL to know if it - * has the CPUs, so we do that - */ - if (firmware_has_feature(FW_FEATURE_OPALv3)) { - uint8_t status; - - rc = opal_query_cpu_status(pcpu, &status); + if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) { + pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu); + rc = opal_start_cpu(pcpu, start_here); if (rc != OPAL_SUCCESS) { - pr_warn("OPAL Error %ld querying CPU %d state\n", + pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr); return -ENODEV; } - - /* - * Already started, just kick it, probably coming from - * kexec and spinning - */ - if (status == OPAL_THREAD_STARTED) - goto kick; - - /* - * Available/inactive, let's kick it - */ - if (status == OPAL_THREAD_INACTIVE) { - pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", - nr, pcpu); - rc = opal_start_cpu(pcpu, start_here); - if (rc != OPAL_SUCCESS) { - pr_warn("OPAL Error %ld starting CPU %d\n", - rc, nr); - return -ENODEV; - } - } else { - /* - * An unavailable CPU (or any other unknown status) - * shouldn't be started. It should also - * not be in the possible map but currently it can - * happen - */ - pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable" - " (status %d)...\n", nr, pcpu, status); - return -ENODEV; - } - } else { - /* - * On OPAL v2, we just kick it and hope for the best, - * we must not test the error from opal_start_cpu() or - * we would fail to get CPUs from kexec. - */ - opal_start_cpu(pcpu, start_here); } - kick: return smp_generic_kick_cpu(nr); } diff --git a/trunk/arch/powerpc/platforms/pseries/Kconfig b/trunk/arch/powerpc/platforms/pseries/Kconfig index 4459eff7a75a..9a0941bc4d31 100644 --- a/trunk/arch/powerpc/platforms/pseries/Kconfig +++ b/trunk/arch/powerpc/platforms/pseries/Kconfig @@ -18,9 +18,6 @@ config PPC_PSERIES select PPC_PCI_CHOICE if EXPERT select ZLIB_DEFLATE select PPC_DOORBELL - select HAVE_CONTEXT_TRACKING - select HOTPLUG if SMP - select HOTPLUG_CPU if SMP default y config PPC_SPLPAR diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_cache.c b/trunk/arch/powerpc/platforms/pseries/eeh_cache.c index 5ce3ba7ad137..5a4c87903057 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_cache.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_cache.c @@ -294,6 +294,8 @@ void __init eeh_addr_cache_build(void) spin_lock_init(&pci_io_addr_cache_root.piar_lock); for_each_pci_dev(dev) { + eeh_addr_cache_insert_dev(dev); + dn = pci_device_to_OF_node(dev); if (!dn) continue; @@ -306,8 +308,6 @@ void __init eeh_addr_cache_build(void) dev->dev.archdata.edev = edev; edev->pdev = dev; - eeh_addr_cache_insert_dev(dev); - eeh_sysfs_add_device(dev); } diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_pe.c b/trunk/arch/powerpc/platforms/pseries/eeh_pe.c index 9d4a9e8562b2..fe43d1aa2cf1 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_pe.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_pe.c @@ -639,8 +639,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) if (pe->type & EEH_PE_PHB) { bus = pe->phb->bus; - } else if (pe->type & EEH_PE_BUS || - pe->type & EEH_PE_DEVICE) { + } else if (pe->type & EEH_PE_BUS) { edev = list_first_entry(&pe->edevs, struct eeh_dev, list); pdev = eeh_dev_to_pci_dev(edev); if (pdev) diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_pseries.c b/trunk/arch/powerpc/platforms/pseries/eeh_pseries.c index b456b157d33d..19506f935737 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -83,11 +83,7 @@ static int pseries_eeh_init(void) ibm_configure_pe = rtas_token("ibm,configure-pe"); ibm_configure_bridge = rtas_token("ibm,configure-bridge"); - /* - * Necessary sanity check. We needn't check "get-config-addr-info" - * and its variant since the old firmware probably support address - * of domain/bus/slot/function for EEH RTAS operations. - */ + /* necessary sanity check */ if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: RTAS service invalid\n", __func__); @@ -106,6 +102,12 @@ static int pseries_eeh_init(void) pr_warning("%s: RTAS service invalid\n", __func__); return -EINVAL; + } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE && + ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) { + pr_warning("%s: RTAS service and " + " invalid\n", + __func__); + return -EINVAL; } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: RTAS service and " diff --git a/trunk/arch/powerpc/platforms/pseries/msi.c b/trunk/arch/powerpc/platforms/pseries/msi.c index 6d2f0abce6fa..420524e6f8c9 100644 --- a/trunk/arch/powerpc/platforms/pseries/msi.c +++ b/trunk/arch/powerpc/platforms/pseries/msi.c @@ -26,6 +26,26 @@ static int query_token, change_token; #define RTAS_CHANGE_MSIX_FN 4 #define RTAS_CHANGE_32MSI_FN 5 +static struct pci_dn *get_pdn(struct pci_dev *pdev) +{ + struct device_node *dn; + struct pci_dn *pdn; + + dn = pci_device_to_OF_node(pdev); + if (!dn) { + dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n"); + return NULL; + } + + pdn = PCI_DN(dn); + if (!pdn) { + dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n"); + return NULL; + } + + return pdn; +} + /* RTAS Helpers */ static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs) @@ -71,7 +91,7 @@ static void rtas_disable_msi(struct pci_dev *pdev) { struct pci_dn *pdn; - pdn = pci_get_pdn(pdev); + pdn = get_pdn(pdev); if (!pdn) return; @@ -132,7 +152,7 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) struct pci_dn *pdn; const u32 *req_msi; - pdn = pci_get_pdn(pdev); + pdn = get_pdn(pdev); if (!pdn) return -ENODEV; @@ -374,23 +394,6 @@ static int check_msix_entries(struct pci_dev *pdev) return 0; } -static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev) -{ - u32 addr_hi, addr_lo; - - /* - * We should only get in here for IODA1 configs. This is based on the - * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS - * support, and we are in a PCIe Gen2 slot. - */ - dev_info(&pdev->dev, - "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n"); - pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi); - addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4); - pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo); - pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0); -} - static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) { struct pci_dn *pdn; @@ -398,9 +401,8 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) struct msi_desc *entry; struct msi_msg msg; int nvec = nvec_in; - int use_32bit_msi_hack = 0; - pdn = pci_get_pdn(pdev); + pdn = get_pdn(pdev); if (!pdn) return -ENODEV; @@ -426,31 +428,15 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) */ again: if (type == PCI_CAP_ID_MSI) { - if (pdn->force_32bit_msi) { + if (pdn->force_32bit_msi) rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); - if (rc < 0) { - /* - * We only want to run the 32 bit MSI hack below if - * the max bus speed is Gen2 speed - */ - if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) - return rc; - - use_32bit_msi_hack = 1; - } - } else - rc = -1; - - if (rc < 0) + else rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); - if (rc < 0) { + if (rc < 0 && !pdn->force_32bit_msi) { pr_debug("rtas_msi: trying the old firmware call.\n"); rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec); } - - if (use_32bit_msi_hack && rc > 0) - rtas_hack_32bit_msi_gen2(pdev); } else rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); @@ -532,3 +518,12 @@ static int rtas_msi_init(void) } arch_initcall(rtas_msi_init); +static void quirk_radeon(struct pci_dev *dev) +{ + struct pci_dn *pdn = get_pdn(dev); + + if (pdn) + pdn->force_32bit_msi = 1; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon); diff --git a/trunk/arch/powerpc/platforms/pseries/suspend.c b/trunk/arch/powerpc/platforms/pseries/suspend.c index 5f997e79d570..47226e04126d 100644 --- a/trunk/arch/powerpc/platforms/pseries/suspend.c +++ b/trunk/arch/powerpc/platforms/pseries/suspend.c @@ -16,7 +16,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include #include #include #include @@ -127,15 +126,11 @@ static ssize_t store_hibernate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - cpumask_var_t offline_mask; int rc; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) - return -ENOMEM; - stream_id = simple_strtoul(buf, NULL, 16); do { @@ -145,32 +140,15 @@ static ssize_t store_hibernate(struct device *dev, } while (rc == -EAGAIN); if (!rc) { - /* All present CPUs must be online */ - cpumask_andnot(offline_mask, cpu_present_mask, - cpu_online_mask); - rc = rtas_online_cpus_mask(offline_mask); - if (rc) { - pr_err("%s: Could not bring present CPUs online.\n", - __func__); - goto out; - } - stop_topology_update(); rc = pm_suspend(PM_SUSPEND_MEM); start_topology_update(); - - /* Take down CPUs not online prior to suspend */ - if (!rtas_offline_cpus_mask(offline_mask)) - pr_warn("%s: Could not restore CPUs to offline " - "state.\n", __func__); } stream_id = 0; if (!rc) rc = count; -out: - free_cpumask_var(offline_mask); return rc; } diff --git a/trunk/arch/powerpc/platforms/wsp/ics.c b/trunk/arch/powerpc/platforms/wsp/ics.c index 2d3b1dd9571d..97fe82ee8633 100644 --- a/trunk/arch/powerpc/platforms/wsp/ics.c +++ b/trunk/arch/powerpc/platforms/wsp/ics.c @@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d, xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); wsp_ics_set_xive(ics, hw_irq, xive); - return IRQ_SET_MASK_OK; + return 0; } static struct irq_chip wsp_irq_chip = { diff --git a/trunk/arch/powerpc/sysdev/Makefile b/trunk/arch/powerpc/sysdev/Makefile index 99464a7bdb3b..b0a518e97599 100644 --- a/trunk/arch/powerpc/sysdev/Makefile +++ b/trunk/arch/powerpc/sysdev/Makefile @@ -64,8 +64,6 @@ endif obj-$(CONFIG_PPC_SCOM) += scom.o -obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o - subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror obj-$(CONFIG_PPC_XICS) += xics/ diff --git a/trunk/arch/powerpc/sysdev/ehv_pic.c b/trunk/arch/powerpc/sysdev/ehv_pic.c index 9cd0e60716fe..6e0e1005227f 100644 --- a/trunk/arch/powerpc/sysdev/ehv_pic.c +++ b/trunk/arch/powerpc/sysdev/ehv_pic.c @@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest, ev_int_set_config(src, config, prio, cpuid); spin_unlock_irqrestore(&ehv_pic_lock, flags); - return IRQ_SET_MASK_OK; + return 0; } static unsigned int ehv_pic_type_to_vecpri(unsigned int type) diff --git a/trunk/arch/powerpc/sysdev/fsl_pci.c b/trunk/arch/powerpc/sysdev/fsl_pci.c index 46ac1ddea683..028ac1f71b51 100644 --- a/trunk/arch/powerpc/sysdev/fsl_pci.c +++ b/trunk/arch/powerpc/sysdev/fsl_pci.c @@ -97,14 +97,22 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn, return indirect_read_config(bus, devfn, offset, len, val); } -#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) - -static struct pci_ops fsl_indirect_pcie_ops = +static struct pci_ops fsl_indirect_pci_ops = { .read = fsl_indirect_read_config, .write = indirect_write_config, }; +static void __init fsl_setup_indirect_pci(struct pci_controller* hose, + resource_size_t cfg_addr, + resource_size_t cfg_data, u32 flags) +{ + setup_indirect_pci(hose, cfg_addr, cfg_data, flags); + hose->ops = &fsl_indirect_pci_ops; +} + +#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) + #define MAX_PHYS_ADDR_BITS 40 static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; @@ -496,15 +504,13 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary) if (!hose->private_data) goto no_bridge; - setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, - PPC_INDIRECT_TYPE_BIG_ENDIAN); + fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, + PPC_INDIRECT_TYPE_BIG_ENDIAN); if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { - /* use fsl_indirect_read_config for PCIe */ - hose->ops = &fsl_indirect_pcie_ops; /* For PCIE read HEADER_TYPE to identify controler mode */ early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) @@ -808,8 +814,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev) if (ret) goto err0; } else { - setup_indirect_pci(hose, rsrc_cfg.start, - rsrc_cfg.start + 4, 0); + fsl_setup_indirect_pci(hose, rsrc_cfg.start, + rsrc_cfg.start + 4, 0); } printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " diff --git a/trunk/arch/powerpc/sysdev/mpic.c b/trunk/arch/powerpc/sysdev/mpic.c index 3cc2f9159ab1..ee21b5e71aec 100644 --- a/trunk/arch/powerpc/sysdev/mpic.c +++ b/trunk/arch/powerpc/sysdev/mpic.c @@ -54,7 +54,7 @@ static DEFINE_RAW_SPINLOCK(mpic_lock); #ifdef CONFIG_PPC32 /* XXX for now */ #ifdef CONFIG_IRQ_ALL_CPUS -#define distribute_irqs (1) +#define distribute_irqs (!(mpic->flags & MPIC_SINGLE_DEST_CPU)) #else #define distribute_irqs (0) #endif @@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, mpic_physmask(mask)); } - return IRQ_SET_MASK_OK; + return 0; } static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) @@ -1703,7 +1703,7 @@ void mpic_setup_this_cpu(void) * it differently, then we should make sure we also change the default * values of irq_desc[].affinity in irq.c. */ - if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) { + if (distribute_irqs) { for (i = 0; i < mpic->num_sources ; i++) mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); diff --git a/trunk/arch/powerpc/sysdev/udbg_memcons.c b/trunk/arch/powerpc/sysdev/udbg_memcons.c deleted file mode 100644 index ce5a7b489e4b..000000000000 --- a/trunk/arch/powerpc/sysdev/udbg_memcons.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * A udbg backend which logs messages and reads input from in memory - * buffers. - * - * The console output can be read from memcons_output which is a - * circular buffer whose next write position is stored in memcons.output_pos. - * - * Input may be passed by writing into the memcons_input buffer when it is - * empty. The input buffer is empty when both input_pos == input_start and - * *input_start == '\0'. - * - * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp - * Copyright (C) 2013 Alistair Popple, IBM Corp - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include - -struct memcons { - char *output_start; - char *output_pos; - char *output_end; - char *input_start; - char *input_pos; - char *input_end; -}; - -static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE]; -static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE]; - -struct memcons memcons = { - .output_start = memcons_output, - .output_pos = memcons_output, - .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE], - .input_start = memcons_input, - .input_pos = memcons_input, - .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE], -}; - -void memcons_putc(char c) -{ - char *new_output_pos; - - *memcons.output_pos = c; - wmb(); - new_output_pos = memcons.output_pos + 1; - if (new_output_pos >= memcons.output_end) - new_output_pos = memcons.output_start; - - memcons.output_pos = new_output_pos; -} - -int memcons_getc_poll(void) -{ - char c; - char *new_input_pos; - - if (*memcons.input_pos) { - c = *memcons.input_pos; - - new_input_pos = memcons.input_pos + 1; - if (new_input_pos >= memcons.input_end) - new_input_pos = memcons.input_start; - else if (*new_input_pos == '\0') - new_input_pos = memcons.input_start; - - *memcons.input_pos = '\0'; - wmb(); - memcons.input_pos = new_input_pos; - return c; - } - - return -1; -} - -int memcons_getc(void) -{ - int c; - - while (1) { - c = memcons_getc_poll(); - if (c == -1) - cpu_relax(); - else - break; - } - - return c; -} - -void udbg_init_memcons(void) -{ - udbg_putc = memcons_putc; - udbg_getc = memcons_getc; - udbg_getc_poll = memcons_getc_poll; -} diff --git a/trunk/arch/powerpc/sysdev/xics/ics-opal.c b/trunk/arch/powerpc/sysdev/xics/ics-opal.c index 39d72212655e..f7e8609df0d5 100644 --- a/trunk/arch/powerpc/sysdev/xics/ics-opal.c +++ b/trunk/arch/powerpc/sysdev/xics/ics-opal.c @@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d, __func__, d->irq, hw_irq, server, rc); return -1; } - return IRQ_SET_MASK_OK; + return 0; } static struct irq_chip ics_opal_irq_chip = { diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index da183c5a103c..2c9789da0e24 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -98,6 +98,7 @@ config S390 select CLONE_BACKWARDS2 select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES if !SMP + select GENERIC_KERNEL_THREAD select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL_OLD select HAVE_ALIGNED_STRUCT_PAGE if SLUB diff --git a/trunk/arch/s390/appldata/appldata_base.c b/trunk/arch/s390/appldata/appldata_base.c index 87a22092b68f..bae0f402bf2a 100644 --- a/trunk/arch/s390/appldata/appldata_base.c +++ b/trunk/arch/s390/appldata/appldata_base.c @@ -212,9 +212,7 @@ appldata_timer_handler(ctl_table *ctl, int write, return 0; } if (!write) { - strncpy(buf, appldata_timer_active ? "1\n" : "0\n", - ARRAY_SIZE(buf)); - len = strnlen(buf, ARRAY_SIZE(buf)); + len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n"); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buf, len)) @@ -319,8 +317,7 @@ appldata_generic_handler(ctl_table *ctl, int write, return 0; } if (!write) { - strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf)); - len = strnlen(buf, ARRAY_SIZE(buf)); + len = sprintf(buf, ops->active ? "1\n" : "0\n"); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buf, len)) { diff --git a/trunk/arch/s390/include/asm/dma-mapping.h b/trunk/arch/s390/include/asm/dma-mapping.h index 2f8c1abeb086..9411db653bac 100644 --- a/trunk/arch/s390/include/asm/dma-mapping.h +++ b/trunk/arch/s390/include/asm/dma-mapping.h @@ -50,10 +50,9 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { struct dma_map_ops *dma_ops = get_dma_ops(dev); - debug_dma_mapping_error(dev, dma_addr); if (dma_ops->mapping_error) return dma_ops->mapping_error(dev, dma_addr); - return (dma_addr == DMA_ERROR_CODE); + return (dma_addr == 0UL); } static inline void *dma_alloc_coherent(struct device *dev, size_t size, @@ -72,8 +71,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size, { struct dma_map_ops *dma_ops = get_dma_ops(dev); - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); dma_ops->free(dev, size, cpu_addr, dma_handle, NULL); + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); } #endif /* _ASM_S390_DMA_MAPPING_H */ diff --git a/trunk/arch/s390/include/asm/ftrace.h b/trunk/arch/s390/include/asm/ftrace.h index bf246dae1367..b7931faaef6d 100644 --- a/trunk/arch/s390/include/asm/ftrace.h +++ b/trunk/arch/s390/include/asm/ftrace.h @@ -9,6 +9,11 @@ struct dyn_arch_ftrace { }; #define MCOUNT_ADDR ((long)_mcount) +#ifdef CONFIG_64BIT +#define MCOUNT_INSN_SIZE 12 +#else +#define MCOUNT_INSN_SIZE 20 +#endif static inline unsigned long ftrace_call_adjust(unsigned long addr) { @@ -16,11 +21,4 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) } #endif /* __ASSEMBLY__ */ - -#ifdef CONFIG_64BIT -#define MCOUNT_INSN_SIZE 12 -#else -#define MCOUNT_INSN_SIZE 22 -#endif - #endif /* _ASM_S390_FTRACE_H */ diff --git a/trunk/arch/s390/include/asm/io.h b/trunk/arch/s390/include/asm/io.h index fd9be010f9b2..379d96e2105e 100644 --- a/trunk/arch/s390/include/asm/io.h +++ b/trunk/arch/s390/include/asm/io.h @@ -36,7 +36,6 @@ static inline void * phys_to_virt(unsigned long address) } void *xlate_dev_mem_ptr(unsigned long phys); -#define xlate_dev_mem_ptr xlate_dev_mem_ptr void unxlate_dev_mem_ptr(unsigned long phys, void *addr); /* diff --git a/trunk/arch/s390/include/asm/page.h b/trunk/arch/s390/include/asm/page.h index 5d64fb7619cc..75ce9b065f9f 100644 --- a/trunk/arch/s390/include/asm/page.h +++ b/trunk/arch/s390/include/asm/page.h @@ -32,7 +32,7 @@ void storage_key_init_range(unsigned long start, unsigned long end); -static inline unsigned long pfmf(unsigned long function, unsigned long address) +static unsigned long pfmf(unsigned long function, unsigned long address) { asm volatile( " .insn rre,0xb9af0000,%[function],%[address]" @@ -44,13 +44,17 @@ static inline unsigned long pfmf(unsigned long function, unsigned long address) static inline void clear_page(void *page) { - register unsigned long reg1 asm ("1") = 0; - register void *reg2 asm ("2") = page; - register unsigned long reg3 asm ("3") = 4096; - asm volatile( - " mvcl 2,0" - : "+d" (reg2), "+d" (reg3) : "d" (reg1) - : "memory", "cc"); + if (MACHINE_HAS_PFMF) { + pfmf(0x10000, (unsigned long)page); + } else { + register unsigned long reg1 asm ("1") = 0; + register void *reg2 asm ("2") = page; + register unsigned long reg3 asm ("3") = 4096; + asm volatile( + " mvcl 2,0" + : "+d" (reg2), "+d" (reg3) : "d" (reg1) + : "memory", "cc"); + } } static inline void copy_page(void *to, void *from) diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h index 9aefa3c64eb2..4105b8221fdd 100644 --- a/trunk/arch/s390/include/asm/pgtable.h +++ b/trunk/arch/s390/include/asm/pgtable.h @@ -58,6 +58,9 @@ extern unsigned long zero_page_mask; #define __HAVE_COLOR_ZERO_PAGE /* TODO: s390 cannot support io_remap_pfn_range... */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #endif /* !__ASSEMBLY__ */ /* @@ -303,7 +306,7 @@ extern unsigned long MODULES_END; #define RCP_HC_BIT 0x00200000UL #define RCP_GR_BIT 0x00040000UL #define RCP_GC_BIT 0x00020000UL -#define RCP_IN_BIT 0x00002000UL /* IPTE notify bit */ +#define RCP_IN_BIT 0x00008000UL /* IPTE notify bit */ /* User dirty / referenced bit for KVM's migration feature */ #define KVM_UR_BIT 0x00008000UL @@ -371,7 +374,7 @@ extern unsigned long MODULES_END; #define RCP_HC_BIT 0x0020000000000000UL #define RCP_GR_BIT 0x0004000000000000UL #define RCP_GC_BIT 0x0002000000000000UL -#define RCP_IN_BIT 0x0000200000000000UL /* IPTE notify bit */ +#define RCP_IN_BIT 0x0000800000000000UL /* IPTE notify bit */ /* User dirty / referenced bit for KVM's migration feature */ #define KVM_UR_BIT 0x0000800000000000UL @@ -620,7 +623,7 @@ static inline pgste_t pgste_get_lock(pte_t *ptep) " csg %0,%1,%2\n" " jl 0b\n" : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) - : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); + : "Q" (ptep[PTRS_PER_PTE]) : "cc"); #endif return __pgste(new); } @@ -632,26 +635,18 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ " stg %1,%0\n" : "=Q" (ptep[PTRS_PER_PTE]) - : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) - : "cc", "memory"); + : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc"); preempt_enable(); #endif } -static inline void pgste_set(pte_t *ptep, pgste_t pgste) -{ -#ifdef CONFIG_PGSTE - *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste; -#endif -} - static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) { #ifdef CONFIG_PGSTE unsigned long address, bits; unsigned char skey; - if (pte_val(*ptep) & _PAGE_INVALID) + if (!pte_present(*ptep)) return pgste; address = pte_val(*ptep) & PAGE_MASK; skey = page_get_storage_key(address); @@ -685,7 +680,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) #ifdef CONFIG_PGSTE int young; - if (pte_val(*ptep) & _PAGE_INVALID) + if (!pte_present(*ptep)) return pgste; /* Get referenced bit from storage key */ young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); @@ -709,19 +704,17 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) { #ifdef CONFIG_PGSTE unsigned long address; - unsigned long nkey; + unsigned long okey, nkey; - if (pte_val(entry) & _PAGE_INVALID) + if (!pte_present(entry)) return; - VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); address = pte_val(entry) & PAGE_MASK; - /* - * Set page access key and fetch protection bit from pgste. - * The guest C/R information is still in the PGSTE, set real - * key C/R to 0. - */ - nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; - page_set_storage_key(address, nkey, 0); + okey = nkey = page_get_storage_key(address); + nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); + /* Set page access key and fetch protection bit from pgste */ + nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; + if (okey != nkey) + page_set_storage_key(address, nkey, 0); #endif } @@ -1105,11 +1098,6 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, pte = *ptep; if (!mm_exclusive(mm)) __ptep_ipte(address, ptep); - - if (mm_has_pgste(mm)) { - pgste = pgste_update_all(&pte, pgste); - pgste_set(ptep, pgste); - } return pte; } @@ -1117,13 +1105,9 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long address, pte_t *ptep, pte_t pte) { - pgste_t pgste; - if (mm_has_pgste(mm)) { - pgste = *(pgste_t *)(ptep + PTRS_PER_PTE); - pgste_set_key(ptep, pgste, pte); pgste_set_pte(ptep, pte); - pgste_set_unlock(ptep, pgste); + pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); } else *ptep = pte; } diff --git a/trunk/arch/s390/kernel/dis.c b/trunk/arch/s390/kernel/dis.c index be87d3e05a5b..7f4a4a8c847c 100644 --- a/trunk/arch/s390/kernel/dis.c +++ b/trunk/arch/s390/kernel/dis.c @@ -1862,8 +1862,6 @@ void print_fn_code(unsigned char *code, unsigned long len) while (len) { ptr = buffer; opsize = insn_length(*code); - if (opsize > len) - break; ptr += sprintf(ptr, "%p: ", code); for (i = 0; i < opsize; i++) ptr += sprintf(ptr, "%02x", code[i]); diff --git a/trunk/arch/s390/kernel/dumpstack.c b/trunk/arch/s390/kernel/dumpstack.c index 87acc38f73c6..298297477257 100644 --- a/trunk/arch/s390/kernel/dumpstack.c +++ b/trunk/arch/s390/kernel/dumpstack.c @@ -74,8 +74,6 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) static void show_trace(struct task_struct *task, unsigned long *stack) { - const unsigned long frame_size = - STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); register unsigned long __r15 asm ("15"); unsigned long sp; @@ -84,13 +82,11 @@ static void show_trace(struct task_struct *task, unsigned long *stack) sp = task ? task->thread.ksp : __r15; printk("Call Trace:\n"); #ifdef CONFIG_CHECK_STACK - sp = __show_trace(sp, - S390_lowcore.panic_stack + frame_size - 4096, - S390_lowcore.panic_stack + frame_size); + sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, + S390_lowcore.panic_stack); #endif - sp = __show_trace(sp, - S390_lowcore.async_stack + frame_size - ASYNC_SIZE, - S390_lowcore.async_stack + frame_size); + sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, + S390_lowcore.async_stack); if (task) __show_trace(sp, (unsigned long) task_stack_page(task), (unsigned long) task_stack_page(task) + THREAD_SIZE); diff --git a/trunk/arch/s390/kernel/ftrace.c b/trunk/arch/s390/kernel/ftrace.c index e3043aef87a9..78bdf0e5dff7 100644 --- a/trunk/arch/s390/kernel/ftrace.c +++ b/trunk/arch/s390/kernel/ftrace.c @@ -16,6 +16,12 @@ #include #include +#ifdef CONFIG_64BIT +#define MCOUNT_OFFSET_RET 12 +#else +#define MCOUNT_OFFSET_RET 22 +#endif + #ifdef CONFIG_DYNAMIC_FTRACE void ftrace_disable_code(void); @@ -149,10 +155,9 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; - ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) goto out; - trace.func = ip; + trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET; /* Only trace if the calling function expects to. */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; diff --git a/trunk/arch/s390/kernel/ipl.c b/trunk/arch/s390/kernel/ipl.c index feb719d3c851..d8a6a385d048 100644 --- a/trunk/arch/s390/kernel/ipl.c +++ b/trunk/arch/s390/kernel/ipl.c @@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = { .write = reipl_fcp_scpdata_write, }; -DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n", +DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", reipl_block_fcp->ipl_info.fcp.wwpn); -DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n", +DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", reipl_block_fcp->ipl_info.fcp.lun); DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", reipl_block_fcp->ipl_info.fcp.bootprog); @@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = { /* FCP dump device attributes */ -DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n", +DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", dump_block_fcp->ipl_info.fcp.wwpn); -DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n", +DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", dump_block_fcp->ipl_info.fcp.lun); DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", dump_block_fcp->ipl_info.fcp.bootprog); diff --git a/trunk/arch/s390/kernel/irq.c b/trunk/arch/s390/kernel/irq.c index dd3c1994b8bd..f7fb58903f6a 100644 --- a/trunk/arch/s390/kernel/irq.c +++ b/trunk/arch/s390/kernel/irq.c @@ -311,69 +311,3 @@ void measurement_alert_subclass_unregister(void) spin_unlock(&ma_subclass_lock); } EXPORT_SYMBOL(measurement_alert_subclass_unregister); - -#ifdef CONFIG_SMP -void synchronize_irq(unsigned int irq) -{ - /* - * Not needed, the handler is protected by a lock and IRQs that occur - * after the handler is deleted are just NOPs. - */ -} -EXPORT_SYMBOL_GPL(synchronize_irq); -#endif - -#ifndef CONFIG_PCI - -/* Only PCI devices have dynamically-defined IRQ handlers */ - -int request_irq(unsigned int irq, irq_handler_t handler, - unsigned long irqflags, const char *devname, void *dev_id) -{ - return -EINVAL; -} -EXPORT_SYMBOL_GPL(request_irq); - -void free_irq(unsigned int irq, void *dev_id) -{ - WARN_ON(1); -} -EXPORT_SYMBOL_GPL(free_irq); - -void enable_irq(unsigned int irq) -{ - WARN_ON(1); -} -EXPORT_SYMBOL_GPL(enable_irq); - -void disable_irq(unsigned int irq) -{ - WARN_ON(1); -} -EXPORT_SYMBOL_GPL(disable_irq); - -#endif /* !CONFIG_PCI */ - -void disable_irq_nosync(unsigned int irq) -{ - disable_irq(irq); -} -EXPORT_SYMBOL_GPL(disable_irq_nosync); - -unsigned long probe_irq_on(void) -{ - return 0; -} -EXPORT_SYMBOL_GPL(probe_irq_on); - -int probe_irq_off(unsigned long val) -{ - return 0; -} -EXPORT_SYMBOL_GPL(probe_irq_off); - -unsigned int probe_irq_mask(unsigned long val) -{ - return val; -} -EXPORT_SYMBOL_GPL(probe_irq_mask); diff --git a/trunk/arch/s390/kernel/mcount.S b/trunk/arch/s390/kernel/mcount.S index 08dcf21cb8df..4567ce20d900 100644 --- a/trunk/arch/s390/kernel/mcount.S +++ b/trunk/arch/s390/kernel/mcount.S @@ -7,7 +7,6 @@ #include #include -#include .section .kprobes.text, "ax" @@ -34,7 +33,6 @@ ENTRY(ftrace_caller) la %r2,0(%r14) st %r0,__SF_BACKCHAIN(%r15) la %r3,0(%r3) - ahi %r2,-MCOUNT_INSN_SIZE l %r14,0b-0b(%r1) l %r14,0(%r14) basr %r14,%r14 diff --git a/trunk/arch/s390/kernel/mcount64.S b/trunk/arch/s390/kernel/mcount64.S index 1c52eae3396a..11332193db30 100644 --- a/trunk/arch/s390/kernel/mcount64.S +++ b/trunk/arch/s390/kernel/mcount64.S @@ -7,7 +7,6 @@ #include #include -#include .section .kprobes.text, "ax" @@ -30,7 +29,6 @@ ENTRY(ftrace_caller) stg %r1,__SF_BACKCHAIN(%r15) lgr %r2,%r14 lg %r3,168(%r15) - aghi %r2,-MCOUNT_INSN_SIZE larl %r14,ftrace_trace_function lg %r14,0(%r14) basr %r14,%r14 diff --git a/trunk/arch/s390/kernel/sclp.S b/trunk/arch/s390/kernel/sclp.S index 29bd7bec4176..b6506ee32a36 100644 --- a/trunk/arch/s390/kernel/sclp.S +++ b/trunk/arch/s390/kernel/sclp.S @@ -225,7 +225,7 @@ _sclp_print: ahi %r2,1 ltr %r0,%r0 # end of string? jz .LfinalizemtoS4 - chi %r0,0x0a # end of line (NL)? + chi %r0,0x15 # end of line (NL)? jz .LfinalizemtoS4 stc %r0,0(%r6,%r7) # copy to mto la %r11,0(%r6,%r7) diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c index 4f977d0d25c2..8074cb4b7cbf 100644 --- a/trunk/arch/s390/kernel/smp.c +++ b/trunk/arch/s390/kernel/smp.c @@ -428,27 +428,34 @@ void smp_stop_cpu(void) * This is the main routine where commands issued by other * cpus are handled. */ -static void smp_handle_ext_call(void) +static void do_ext_call_interrupt(struct ext_code ext_code, + unsigned int param32, unsigned long param64) { unsigned long bits; + int cpu; + + cpu = smp_processor_id(); + if (ext_code.code == 0x1202) + inc_irq_stat(IRQEXT_EXC); + else + inc_irq_stat(IRQEXT_EMS); + /* + * handle bit signal external calls + */ + bits = xchg(&pcpu_devices[cpu].ec_mask, 0); - /* handle bit signal external calls */ - bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0); if (test_bit(ec_stop_cpu, &bits)) smp_stop_cpu(); + if (test_bit(ec_schedule, &bits)) scheduler_ipi(); + if (test_bit(ec_call_function, &bits)) generic_smp_call_function_interrupt(); + if (test_bit(ec_call_function_single, &bits)) generic_smp_call_function_single_interrupt(); -} -static void do_ext_call_interrupt(struct ext_code ext_code, - unsigned int param32, unsigned long param64) -{ - inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS); - smp_handle_ext_call(); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -638,7 +645,7 @@ static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info, continue; pcpu = pcpu_devices + cpu; pcpu->address = info->cpu[i].address; - pcpu->state = (i >= info->configured) ? + pcpu->state = (cpu >= info->configured) ? CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); set_cpu_present(cpu, true); @@ -753,8 +760,6 @@ int __cpu_disable(void) { unsigned long cregs[16]; - /* Handle possible pending IPIs */ - smp_handle_ext_call(); set_cpu_online(smp_processor_id(), false); /* Disable pseudo page faults on this cpu. */ pfault_fini(); diff --git a/trunk/arch/s390/mm/mem_detect.c b/trunk/arch/s390/mm/mem_detect.c index cca388253a39..3cbd3b8bf311 100644 --- a/trunk/arch/s390/mm/mem_detect.c +++ b/trunk/arch/s390/mm/mem_detect.c @@ -123,8 +123,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, continue; } else if ((addr <= chunk->addr) && (addr + size >= chunk->addr + chunk->size)) { - memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk)); - memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk)); + memset(chunk, 0 , sizeof(*chunk)); } else if (addr + size < chunk->addr + chunk->size) { chunk->size = chunk->addr + chunk->size - addr - size; chunk->addr = addr + size; diff --git a/trunk/arch/s390/mm/pgtable.c b/trunk/arch/s390/mm/pgtable.c index a938b548f07e..7805ddca833d 100644 --- a/trunk/arch/s390/mm/pgtable.c +++ b/trunk/arch/s390/mm/pgtable.c @@ -492,7 +492,7 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment, mp = (struct gmap_pgtable *) page->index; rmap->gmap = gmap; rmap->entry = segment_ptr; - rmap->vmaddr = address & PMD_MASK; + rmap->vmaddr = address; spin_lock(&mm->page_table_lock); if (*segment_ptr == segment) { list_add(&rmap->list, &mp->mapper); @@ -677,7 +677,8 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len) break; } /* Get the page mapped */ - if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { + if (get_user_pages(current, gmap->mm, addr, 1, 1, 0, + NULL, NULL) != 1) { rc = -EFAULT; break; } diff --git a/trunk/arch/s390/pci/pci.c b/trunk/arch/s390/pci/pci.c index f1e5be85d592..e6f15b5d8b7d 100644 --- a/trunk/arch/s390/pci/pci.c +++ b/trunk/arch/s390/pci/pci.c @@ -302,6 +302,15 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) return rc; } +void synchronize_irq(unsigned int irq) +{ + /* + * Not needed, the handler is protected by a lock and IRQs that occur + * after the handler is deleted are just NOPs. + */ +} +EXPORT_SYMBOL_GPL(synchronize_irq); + void enable_irq(unsigned int irq) { struct msi_desc *msi = irq_get_msi_desc(irq); @@ -318,6 +327,30 @@ void disable_irq(unsigned int irq) } EXPORT_SYMBOL_GPL(disable_irq); +void disable_irq_nosync(unsigned int irq) +{ + disable_irq(irq); +} +EXPORT_SYMBOL_GPL(disable_irq_nosync); + +unsigned long probe_irq_on(void) +{ + return 0; +} +EXPORT_SYMBOL_GPL(probe_irq_on); + +int probe_irq_off(unsigned long val) +{ + return 0; +} +EXPORT_SYMBOL_GPL(probe_irq_off); + +unsigned int probe_irq_mask(unsigned long val) +{ + return val; +} +EXPORT_SYMBOL_GPL(probe_irq_mask); + void pcibios_fixup_bus(struct pci_bus *bus) { } diff --git a/trunk/arch/score/include/asm/pgtable.h b/trunk/arch/score/include/asm/pgtable.h index db96ad9afc03..2fd469807683 100644 --- a/trunk/arch/score/include/asm/pgtable.h +++ b/trunk/arch/score/include/asm/pgtable.h @@ -113,6 +113,9 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define pte_clear(mm, addr, xp) \ do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + /* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pgd is never bad, and a pmd always exists (as it's folded diff --git a/trunk/arch/score/mm/init.c b/trunk/arch/score/mm/init.c index 0940682ab38b..d8f988a37d16 100644 --- a/trunk/arch/score/mm/init.c +++ b/trunk/arch/score/mm/init.c @@ -41,6 +41,8 @@ unsigned long empty_zero_page; EXPORT_SYMBOL_GPL(empty_zero_page); +static struct kcore_list kcore_mem, kcore_vmalloc; + static void setup_zero_page(void) { struct page *page; diff --git a/trunk/arch/sh/include/asm/pgtable.h b/trunk/arch/sh/include/asm/pgtable.h index cf434c64408d..9210e93a92c3 100644 --- a/trunk/arch/sh/include/asm/pgtable.h +++ b/trunk/arch/sh/include/asm/pgtable.h @@ -124,6 +124,9 @@ typedef pte_t *pte_addr_t; #define kern_addr_valid(addr) (1) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) /* diff --git a/trunk/arch/sparc/include/asm/Kbuild b/trunk/arch/sparc/include/asm/Kbuild index 7e4a97fbded4..ff18e3cfb6b1 100644 --- a/trunk/arch/sparc/include/asm/Kbuild +++ b/trunk/arch/sparc/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h -generic-y += linkage.h generic-y += local64.h generic-y += mutex.h generic-y += irq_regs.h diff --git a/trunk/arch/sparc/include/asm/leon.h b/trunk/arch/sparc/include/asm/leon.h index b836e9297f2a..15a716934e4d 100644 --- a/trunk/arch/sparc/include/asm/leon.h +++ b/trunk/arch/sparc/include/asm/leon.h @@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void) #ifdef CONFIG_SMP # define LEON3_IRQ_IPI_DEFAULT 13 -# define LEON3_IRQ_TICKER (leon3_gptimer_irq) +# define LEON3_IRQ_TICKER (leon3_ticker_irq) # define LEON3_IRQ_CROSS_CALL 15 #endif diff --git a/trunk/arch/sparc/include/asm/leon_amba.h b/trunk/arch/sparc/include/asm/leon_amba.h index 24ec48c3ff90..f3034eddf468 100644 --- a/trunk/arch/sparc/include/asm/leon_amba.h +++ b/trunk/arch/sparc/include/asm/leon_amba.h @@ -47,7 +47,6 @@ struct amba_prom_registers { #define LEON3_GPTIMER_LD 4 #define LEON3_GPTIMER_IRQEN 8 #define LEON3_GPTIMER_SEPIRQ 8 -#define LEON3_GPTIMER_TIMERS 0x7 #define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ /* 0 = hold scalar and counter */ diff --git a/trunk/arch/sparc/include/asm/linkage.h b/trunk/arch/sparc/include/asm/linkage.h new file mode 100644 index 000000000000..291c2d01c44f --- /dev/null +++ b/trunk/arch/sparc/include/asm/linkage.h @@ -0,0 +1,6 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +/* Nothing to see here... */ + +#endif diff --git a/trunk/arch/sparc/include/asm/pgtable_32.h b/trunk/arch/sparc/include/asm/pgtable_32.h index 502f632f6cc7..6fc13483f702 100644 --- a/trunk/arch/sparc/include/asm/pgtable_32.h +++ b/trunk/arch/sparc/include/asm/pgtable_32.h @@ -443,7 +443,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); } -#define io_remap_pfn_range io_remap_pfn_range #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ diff --git a/trunk/arch/sparc/include/asm/pgtable_64.h b/trunk/arch/sparc/include/asm/pgtable_64.h index 79c214efa3fe..7619f2f792af 100644 --- a/trunk/arch/sparc/include/asm/pgtable_64.h +++ b/trunk/arch/sparc/include/asm/pgtable_64.h @@ -914,7 +914,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); } -#define io_remap_pfn_range io_remap_pfn_range #include #include diff --git a/trunk/arch/sparc/kernel/ds.c b/trunk/arch/sparc/kernel/ds.c index 5ef48dab5636..75bb608c423e 100644 --- a/trunk/arch/sparc/kernel/ds.c +++ b/trunk/arch/sparc/kernel/ds.c @@ -843,8 +843,7 @@ void ldom_reboot(const char *boot_command) unsigned long len; strcpy(full_boot_str, "boot "); - strlcpy(full_boot_str + strlen("boot "), boot_command, - sizeof(full_boot_str + strlen("boot "))); + strcpy(full_boot_str + strlen("boot "), boot_command); len = strlen(full_boot_str); if (reboot_data_supported) { diff --git a/trunk/arch/sparc/kernel/leon_kernel.c b/trunk/arch/sparc/kernel/leon_kernel.c index b7c68976cbc7..7c0231dabe44 100644 --- a/trunk/arch/sparc/kernel/leon_kernel.c +++ b/trunk/arch/sparc/kernel/leon_kernel.c @@ -38,6 +38,7 @@ static DEFINE_SPINLOCK(leon_irq_lock); unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ +int leon3_ticker_irq; /* Timer ticker IRQ */ unsigned int sparc_leon_eirq; #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) #define LEON_IACK (&leon3_irqctrl_regs->iclear) @@ -277,9 +278,6 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused) leon_clear_profile_irq(cpu); - if (cpu == boot_cpu_id) - timer_interrupt(irq, NULL); - ce = &per_cpu(sparc32_clockevent, cpu); irq_enter(); @@ -301,7 +299,6 @@ void __init leon_init_timers(void) int icsel; int ampopts; int err; - u32 config; sparc_config.get_cycles_offset = leon_cycles_offset; sparc_config.cs_period = 1000000 / HZ; @@ -380,6 +377,23 @@ void __init leon_init_timers(void) LEON3_BYPASS_STORE_PA( &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); +#ifdef CONFIG_SMP + leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx; + + if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & + (1<e[leon3_gptimer_idx+1].val, + 0); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld, + (((1000000/HZ) - 1))); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, + 0); +#endif + /* * The IRQ controller may (if implemented) consist of multiple * IRQ controllers, each mapped on a 4Kb boundary. @@ -402,6 +416,13 @@ void __init leon_init_timers(void) if (eirq != 0) leon_eirq_setup(eirq); + irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx); + err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); + if (err) { + printk(KERN_ERR "unable to attach timer IRQ%d\n", irq); + prom_halt(); + } + #ifdef CONFIG_SMP { unsigned long flags; @@ -418,31 +439,30 @@ void __init leon_init_timers(void) } #endif - config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config); - if (config & (1 << LEON3_GPTIMER_SEPIRQ)) - leon3_gptimer_irq += leon3_gptimer_idx; - else if ((config & LEON3_GPTIMER_TIMERS) > 1) - pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n"); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, + LEON3_GPTIMER_EN | + LEON3_GPTIMER_RL | + LEON3_GPTIMER_LD | + LEON3_GPTIMER_IRQEN); #ifdef CONFIG_SMP /* Install per-cpu IRQ handler for broadcasted ticker */ - irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq, + irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq, "per-cpu", 0); err = request_irq(irq, leon_percpu_timer_ce_interrupt, - IRQF_PERCPU | IRQF_TIMER, "timer", NULL); -#else - irq = _leon_build_device_irq(NULL, leon3_gptimer_irq); - err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); -#endif + IRQF_PERCPU | IRQF_TIMER, "ticker", + NULL); if (err) { - pr_err("Unable to attach timer IRQ%d\n", irq); + printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq); prom_halt(); } - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, + + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, LEON3_GPTIMER_EN | LEON3_GPTIMER_RL | LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); +#endif return; bad: printk(KERN_ERR "No Timer/irqctrl found\n"); diff --git a/trunk/arch/sparc/kernel/leon_pci_grpci1.c b/trunk/arch/sparc/kernel/leon_pci_grpci1.c index 6df26e37f879..7739a54315e2 100644 --- a/trunk/arch/sparc/kernel/leon_pci_grpci1.c +++ b/trunk/arch/sparc/kernel/leon_pci_grpci1.c @@ -536,9 +536,11 @@ static int grpci1_of_probe(struct platform_device *ofdev) /* find device register base address */ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(&ofdev->dev, res); - if (IS_ERR(regs)) - return PTR_ERR(regs); + regs = devm_request_and_ioremap(&ofdev->dev, res); + if (!regs) { + dev_err(&ofdev->dev, "io-regs mapping failed\n"); + return -EADDRNOTAVAIL; + } /* * check that we're in Host Slot and that we can act as a Host Bridge diff --git a/trunk/arch/sparc/kernel/leon_pmc.c b/trunk/arch/sparc/kernel/leon_pmc.c index b0b3967a2dd2..bdf53d9a8d46 100644 --- a/trunk/arch/sparc/kernel/leon_pmc.c +++ b/trunk/arch/sparc/kernel/leon_pmc.c @@ -47,10 +47,6 @@ void pmc_leon_idle_fixup(void) * MMU does not get a TLB miss here by using the MMU BYPASS ASI. */ register unsigned int address = (unsigned int)leon3_irqctrl_regs; - - /* Interrupts need to be enabled to not hang the CPU */ - local_irq_enable(); - __asm__ __volatile__ ( "wr %%g0, %%asr19\n" "lda [%0] %1, %%g0\n" @@ -64,9 +60,6 @@ void pmc_leon_idle_fixup(void) */ void pmc_leon_idle(void) { - /* Interrupts need to be enabled to not hang the CPU */ - local_irq_enable(); - /* For systems without power-down, this will be no-op */ __asm__ __volatile__ ("wr %g0, %asr19\n\t"); } diff --git a/trunk/arch/sparc/kernel/pci.c b/trunk/arch/sparc/kernel/pci.c index 2031c65fd4ea..baf4366e2d6a 100644 --- a/trunk/arch/sparc/kernel/pci.c +++ b/trunk/arch/sparc/kernel/pci.c @@ -773,6 +773,15 @@ static int __pci_mmap_make_offset(struct pci_dev *pdev, return 0; } +/* Set vm_flags of VMA, as appropriate for this architecture, for a pci device + * mapping. + */ +static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state) +{ + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; +} + /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */ @@ -800,6 +809,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, if (ret < 0) return ret; + __pci_mmap_set_flags(dev, vma, mmap_state); __pci_mmap_set_pgprot(dev, vma, mmap_state); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); diff --git a/trunk/arch/sparc/kernel/prom_common.c b/trunk/arch/sparc/kernel/prom_common.c index 79cc0d1a477d..9f20566b0773 100644 --- a/trunk/arch/sparc/kernel/prom_common.c +++ b/trunk/arch/sparc/kernel/prom_common.c @@ -54,7 +54,6 @@ EXPORT_SYMBOL(of_set_property_mutex); int of_set_property(struct device_node *dp, const char *name, void *val, int len) { struct property **prevp; - unsigned long flags; void *new_val; int err; @@ -65,7 +64,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len err = -ENODEV; mutex_lock(&of_set_property_mutex); - raw_spin_lock_irqsave(&devtree_lock, flags); + raw_spin_lock(&devtree_lock); prevp = &dp->properties; while (*prevp) { struct property *prop = *prevp; @@ -92,7 +91,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len } prevp = &(*prevp)->next; } - raw_spin_unlock_irqrestore(&devtree_lock, flags); + raw_spin_unlock(&devtree_lock); mutex_unlock(&of_set_property_mutex); /* XXX Upate procfs if necessary... */ diff --git a/trunk/arch/sparc/kernel/setup_32.c b/trunk/arch/sparc/kernel/setup_32.c index 1434526970a6..38bf80a22f02 100644 --- a/trunk/arch/sparc/kernel/setup_32.c +++ b/trunk/arch/sparc/kernel/setup_32.c @@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p) /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); - strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); + strcpy(boot_command_line, *cmdline_p); parse_early_param(); boot_flags_init(*cmdline_p); diff --git a/trunk/arch/sparc/kernel/setup_64.c b/trunk/arch/sparc/kernel/setup_64.c index 13785547e435..88a127b9c69e 100644 --- a/trunk/arch/sparc/kernel/setup_64.c +++ b/trunk/arch/sparc/kernel/setup_64.c @@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); - strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); + strcpy(boot_command_line, *cmdline_p); parse_early_param(); boot_flags_init(*cmdline_p); diff --git a/trunk/arch/sparc/mm/init_64.c b/trunk/arch/sparc/mm/init_64.c index 04fd55a6e461..a7171997adfd 100644 --- a/trunk/arch/sparc/mm/init_64.c +++ b/trunk/arch/sparc/mm/init_64.c @@ -1098,14 +1098,7 @@ static int __init grab_mblocks(struct mdesc_handle *md) m->size = *val; val = mdesc_get_property(md, node, "address-congruence-offset", NULL); - - /* The address-congruence-offset property is optional. - * Explicity zero it be identifty this. - */ - if (val) - m->offset = *val; - else - m->offset = 0UL; + m->offset = *val; numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", count - 1, m->base, m->size, m->offset); diff --git a/trunk/arch/sparc/mm/tlb.c b/trunk/arch/sparc/mm/tlb.c index 37e7bc4c95b3..83d89bcb44af 100644 --- a/trunk/arch/sparc/mm/tlb.c +++ b/trunk/arch/sparc/mm/tlb.c @@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, } if (!tb->active) { - flush_tsb_user_page(mm, vaddr); global_flush_tlb_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr); goto out; } diff --git a/trunk/arch/sparc/prom/bootstr_32.c b/trunk/arch/sparc/prom/bootstr_32.c index d2b49d2365e7..f5ec32e0d419 100644 --- a/trunk/arch/sparc/prom/bootstr_32.c +++ b/trunk/arch/sparc/prom/bootstr_32.c @@ -23,25 +23,23 @@ prom_getbootargs(void) return barg_buf; } - switch (prom_vers) { + switch(prom_vers) { case PROM_V0: cp = barg_buf; /* Start from 1 and go over fd(0,0,0)kernel */ - for (iter = 1; iter < 8; iter++) { + for(iter = 1; iter < 8; iter++) { arg = (*(romvec->pv_v0bootargs))->argv[iter]; if (arg == NULL) break; - while (*arg != 0) { + while(*arg != 0) { /* Leave place for space and null. */ - if (cp >= barg_buf + BARG_LEN - 2) + if(cp >= barg_buf + BARG_LEN-2){ /* We might issue a warning here. */ break; + } *cp++ = *arg++; } *cp++ = ' '; - if (cp >= barg_buf + BARG_LEN - 1) - /* We might issue a warning here. */ - break; } *cp = 0; break; diff --git a/trunk/arch/sparc/prom/tree_64.c b/trunk/arch/sparc/prom/tree_64.c index bd1b2a3ac34e..92204c3800b5 100644 --- a/trunk/arch/sparc/prom/tree_64.c +++ b/trunk/arch/sparc/prom/tree_64.c @@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node) return prom_node_to_node("child", node); } -phandle prom_getchild(phandle node) +inline phandle prom_getchild(phandle node) { phandle cnode; @@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node) return prom_node_to_node(prom_peer_name, node); } -phandle prom_getsibling(phandle node) +inline phandle prom_getsibling(phandle node) { phandle sibnode; @@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling); /* Return the length in bytes of property 'prop' at node 'node'. * Return -1 on error. */ -int prom_getproplen(phandle node, const char *prop) +inline int prom_getproplen(phandle node, const char *prop) { unsigned long args[6]; @@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen); * 'buffer' which has a size of 'bufsize'. If the acquisition * was successful the length will be returned, else -1 is returned. */ -int prom_getproperty(phandle node, const char *prop, - char *buffer, int bufsize) +inline int prom_getproperty(phandle node, const char *prop, + char *buffer, int bufsize) { unsigned long args[8]; int plen; @@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty); /* Acquire an integer property and return its value. Returns -1 * on failure. */ -int prom_getint(phandle node, const char *prop) +inline int prom_getint(phandle node, const char *prop) { int intprop; @@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop"; /* Return the first property type for node 'node'. * buffer should be at least 32B in length */ -char *prom_firstprop(phandle node, char *buffer) +inline char *prom_firstprop(phandle node, char *buffer) { unsigned long args[7]; @@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop); * at node 'node' . Returns NULL string if no more * property types for this node. */ -char *prom_nextprop(phandle node, const char *oprop, char *buffer) +inline char *prom_nextprop(phandle node, const char *oprop, char *buffer) { unsigned long args[7]; char buf[32]; diff --git a/trunk/arch/tile/include/asm/pgtable.h b/trunk/arch/tile/include/asm/pgtable.h index 33587f16c152..73b1a4c9ad03 100644 --- a/trunk/arch/tile/include/asm/pgtable.h +++ b/trunk/arch/tile/include/asm/pgtable.h @@ -362,6 +362,9 @@ do { \ #define kern_addr_valid(addr) (1) #endif /* CONFIG_FLATMEM */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + extern void vmalloc_sync_all(void); #endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/tile/lib/exports.c b/trunk/arch/tile/lib/exports.c index a93b02a25222..4385cb6fa00a 100644 --- a/trunk/arch/tile/lib/exports.c +++ b/trunk/arch/tile/lib/exports.c @@ -84,6 +84,4 @@ uint64_t __ashrdi3(uint64_t, unsigned int); EXPORT_SYMBOL(__ashrdi3); uint64_t __ashldi3(uint64_t, unsigned int); EXPORT_SYMBOL(__ashldi3); -int __ffsdi2(uint64_t); -EXPORT_SYMBOL(__ffsdi2); #endif diff --git a/trunk/arch/um/drivers/mconsole_kern.c b/trunk/arch/um/drivers/mconsole_kern.c index 3df3bd544492..d7d21851e60c 100644 --- a/trunk/arch/um/drivers/mconsole_kern.c +++ b/trunk/arch/um/drivers/mconsole_kern.c @@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req) } do { - loff_t pos = file->f_pos; + loff_t pos; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); len = vfs_read(file, buf, PAGE_SIZE - 1, &pos); diff --git a/trunk/arch/um/include/asm/pgtable.h b/trunk/arch/um/include/asm/pgtable.h index bf974f712af7..ae02909a1875 100644 --- a/trunk/arch/um/include/asm/pgtable.h +++ b/trunk/arch/um/include/asm/pgtable.h @@ -69,6 +69,8 @@ extern unsigned long end_iomem; #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) +#define io_remap_pfn_range remap_pfn_range + /* * The i386 can't do page protection for execute, and considers that the same * are read. diff --git a/trunk/arch/unicore32/include/asm/pgtable.h b/trunk/arch/unicore32/include/asm/pgtable.h index 233c25880df4..68b2f297ac97 100644 --- a/trunk/arch/unicore32/include/asm/pgtable.h +++ b/trunk/arch/unicore32/include/asm/pgtable.h @@ -303,6 +303,13 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; #include +/* + * remap a physical page `pfn' of size `size' with page protection `prot' + * into virtual address `from' + */ +#define io_remap_pfn_range(vma, from, pfn, size, prot) \ + remap_pfn_range(vma, from, pfn, size, prot) + #define pgtable_cache_init() do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index fe120da25625..6a154a91c7e7 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -108,6 +108,7 @@ config X86 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) select GENERIC_TIME_VSYSCALL if X86_64 select KTIME_SCALAR if X86_32 + select ALWAYS_USE_PERSISTENT_CLOCK select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select HAVE_CONTEXT_TRACKING if X86_64 @@ -2265,7 +2266,6 @@ source "fs/Kconfig.binfmt" config IA32_EMULATION bool "IA32 Emulation" depends on X86_64 - select BINFMT_ELF select COMPAT_BINFMT_ELF select HAVE_UID16 ---help--- diff --git a/trunk/arch/x86/boot/compressed/eboot.c b/trunk/arch/x86/boot/compressed/eboot.c index c205035a6b96..35ee62fccf98 100644 --- a/trunk/arch/x86/boot/compressed/eboot.c +++ b/trunk/arch/x86/boot/compressed/eboot.c @@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) *size = len; } +static efi_status_t setup_efi_vars(struct boot_params *params) +{ + struct setup_data *data; + struct efi_var_bootdata *efidata; + u64 store_size, remaining_size, var_size; + efi_status_t status; + + if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION) + return EFI_UNSUPPORTED; + + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + + while (data && data->next) + data = (struct setup_data *)(unsigned long)data->next; + + status = efi_call_phys4((void *)sys_table->runtime->query_variable_info, + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, &store_size, + &remaining_size, &var_size); + + if (status != EFI_SUCCESS) + return status; + + status = efi_call_phys3(sys_table->boottime->allocate_pool, + EFI_LOADER_DATA, sizeof(*efidata), &efidata); + + if (status != EFI_SUCCESS) + return status; + + efidata->data.type = SETUP_EFI_VARS; + efidata->data.len = sizeof(struct efi_var_bootdata) - + sizeof(struct setup_data); + efidata->data.next = 0; + efidata->store_size = store_size; + efidata->remaining_size = remaining_size; + efidata->max_var_size = var_size; + + if (data) + data->next = (unsigned long)efidata; + else + params->hdr.setup_data = (unsigned long)efidata; + +} + static efi_status_t setup_efi_pci(struct boot_params *params) { efi_pci_io_protocol *pci; @@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table, setup_graphics(boot_params); + setup_efi_vars(boot_params); + setup_efi_pci(boot_params); status = efi_call_phys3(sys_table->boottime->allocate_pool, diff --git a/trunk/arch/x86/crypto/aesni-intel_asm.S b/trunk/arch/x86/crypto/aesni-intel_asm.S index 477e9d75149b..62fe22cd4cba 100644 --- a/trunk/arch/x86/crypto/aesni-intel_asm.S +++ b/trunk/arch/x86/crypto/aesni-intel_asm.S @@ -2681,68 +2681,56 @@ ENTRY(aesni_xts_crypt8) addq %rcx, KEYP movdqa IV, STATE1 - movdqu 0x00(INP), INC - pxor INC, STATE1 + pxor 0x00(INP), STATE1 movdqu IV, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 - movdqu 0x10(INP), INC - pxor INC, STATE2 + pxor 0x10(INP), STATE2 movdqu IV, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 - movdqu 0x20(INP), INC - pxor INC, STATE3 + pxor 0x20(INP), STATE3 movdqu IV, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 - movdqu 0x30(INP), INC - pxor INC, STATE4 + pxor 0x30(INP), STATE4 movdqu IV, 0x30(OUTP) call *%r11 - movdqu 0x00(OUTP), INC - pxor INC, STATE1 + pxor 0x00(OUTP), STATE1 movdqu STATE1, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE1 - movdqu 0x40(INP), INC - pxor INC, STATE1 + pxor 0x40(INP), STATE1 movdqu IV, 0x40(OUTP) - movdqu 0x10(OUTP), INC - pxor INC, STATE2 + pxor 0x10(OUTP), STATE2 movdqu STATE2, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 - movdqu 0x50(INP), INC - pxor INC, STATE2 + pxor 0x50(INP), STATE2 movdqu IV, 0x50(OUTP) - movdqu 0x20(OUTP), INC - pxor INC, STATE3 + pxor 0x20(OUTP), STATE3 movdqu STATE3, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 - movdqu 0x60(INP), INC - pxor INC, STATE3 + pxor 0x60(INP), STATE3 movdqu IV, 0x60(OUTP) - movdqu 0x30(OUTP), INC - pxor INC, STATE4 + pxor 0x30(OUTP), STATE4 movdqu STATE4, 0x30(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 - movdqu 0x70(INP), INC - pxor INC, STATE4 + pxor 0x70(INP), STATE4 movdqu IV, 0x70(OUTP) _aesni_gf128mul_x_ble() @@ -2750,20 +2738,16 @@ ENTRY(aesni_xts_crypt8) call *%r11 - movdqu 0x40(OUTP), INC - pxor INC, STATE1 + pxor 0x40(OUTP), STATE1 movdqu STATE1, 0x40(OUTP) - movdqu 0x50(OUTP), INC - pxor INC, STATE2 + pxor 0x50(OUTP), STATE2 movdqu STATE2, 0x50(OUTP) - movdqu 0x60(OUTP), INC - pxor INC, STATE3 + pxor 0x60(OUTP), STATE3 movdqu STATE3, 0x60(OUTP) - movdqu 0x70(OUTP), INC - pxor INC, STATE4 + pxor 0x70(OUTP), STATE4 movdqu STATE4, 0x70(OUTP) ret diff --git a/trunk/arch/x86/crypto/crc32-pclmul_asm.S b/trunk/arch/x86/crypto/crc32-pclmul_asm.S index f247304299a2..94c27df8a549 100644 --- a/trunk/arch/x86/crypto/crc32-pclmul_asm.S +++ b/trunk/arch/x86/crypto/crc32-pclmul_asm.S @@ -240,7 +240,7 @@ fold_64: pand %xmm3, %xmm1 PCLMULQDQ 0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 - PEXTRD 0x01, %xmm1, %eax + pextrd $0x01, %xmm1, %eax ret ENDPROC(crc32_pclmul_le_16) diff --git a/trunk/arch/x86/crypto/sha256-avx-asm.S b/trunk/arch/x86/crypto/sha256-avx-asm.S index 642f15687a0a..56610c4bf31b 100644 --- a/trunk/arch/x86/crypto/sha256-avx-asm.S +++ b/trunk/arch/x86/crypto/sha256-avx-asm.S @@ -118,7 +118,7 @@ y2 = %r15d _INP_END_SIZE = 8 _INP_SIZE = 8 -_XFER_SIZE = 16 +_XFER_SIZE = 8 _XMM_SAVE_SIZE = 0 _INP_END = 0 diff --git a/trunk/arch/x86/crypto/sha256-ssse3-asm.S b/trunk/arch/x86/crypto/sha256-ssse3-asm.S index f833b74d902b..98d3c391da81 100644 --- a/trunk/arch/x86/crypto/sha256-ssse3-asm.S +++ b/trunk/arch/x86/crypto/sha256-ssse3-asm.S @@ -111,7 +111,7 @@ y2 = %r15d _INP_END_SIZE = 8 _INP_SIZE = 8 -_XFER_SIZE = 16 +_XFER_SIZE = 8 _XMM_SAVE_SIZE = 0 _INP_END = 0 diff --git a/trunk/arch/x86/ia32/ia32_aout.c b/trunk/arch/x86/ia32/ia32_aout.c index 52ff81cce008..805078e08013 100644 --- a/trunk/arch/x86/ia32/ia32_aout.c +++ b/trunk/arch/x86/ia32/ia32_aout.c @@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, /* struct user */ DUMP_WRITE(&dump, sizeof(dump)); /* Now dump all of the user data. Include malloced stuff as well */ - DUMP_SEEK(PAGE_SIZE - sizeof(dump)); + DUMP_SEEK(PAGE_SIZE); /* now we start writing out the user space info */ set_fs(USER_DS); /* Dump the data area */ diff --git a/trunk/arch/x86/include/asm/efi.h b/trunk/arch/x86/include/asm/efi.h index 60c89f30c727..2fb5d5884e23 100644 --- a/trunk/arch/x86/include/asm/efi.h +++ b/trunk/arch/x86/include/asm/efi.h @@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void); extern void efi_unmap_memmap(void); extern void efi_memory_uc(u64 addr, unsigned long size); +struct efi_var_bootdata { + struct setup_data data; + u64 store_size; + u64 remaining_size; + u64 max_var_size; +}; + #ifdef CONFIG_EFI static inline bool efi_is_native(void) diff --git a/trunk/arch/x86/include/asm/inst.h b/trunk/arch/x86/include/asm/inst.h index 3e115273ed88..280bf7fb6aba 100644 --- a/trunk/arch/x86/include/asm/inst.h +++ b/trunk/arch/x86/include/asm/inst.h @@ -9,68 +9,12 @@ #define REG_NUM_INVALID 100 -#define REG_TYPE_R32 0 -#define REG_TYPE_R64 1 -#define REG_TYPE_XMM 2 +#define REG_TYPE_R64 0 +#define REG_TYPE_XMM 1 #define REG_TYPE_INVALID 100 - .macro R32_NUM opd r32 - \opd = REG_NUM_INVALID - .ifc \r32,%eax - \opd = 0 - .endif - .ifc \r32,%ecx - \opd = 1 - .endif - .ifc \r32,%edx - \opd = 2 - .endif - .ifc \r32,%ebx - \opd = 3 - .endif - .ifc \r32,%esp - \opd = 4 - .endif - .ifc \r32,%ebp - \opd = 5 - .endif - .ifc \r32,%esi - \opd = 6 - .endif - .ifc \r32,%edi - \opd = 7 - .endif -#ifdef CONFIG_X86_64 - .ifc \r32,%r8d - \opd = 8 - .endif - .ifc \r32,%r9d - \opd = 9 - .endif - .ifc \r32,%r10d - \opd = 10 - .endif - .ifc \r32,%r11d - \opd = 11 - .endif - .ifc \r32,%r12d - \opd = 12 - .endif - .ifc \r32,%r13d - \opd = 13 - .endif - .ifc \r32,%r14d - \opd = 14 - .endif - .ifc \r32,%r15d - \opd = 15 - .endif -#endif - .endm - .macro R64_NUM opd r64 \opd = REG_NUM_INVALID -#ifdef CONFIG_X86_64 .ifc \r64,%rax \opd = 0 .endif @@ -119,7 +63,6 @@ .ifc \r64,%r15 \opd = 15 .endif -#endif .endm .macro XMM_NUM opd xmm @@ -175,13 +118,10 @@ .endm .macro REG_TYPE type reg - R32_NUM reg_type_r32 \reg R64_NUM reg_type_r64 \reg XMM_NUM reg_type_xmm \reg .if reg_type_r64 <> REG_NUM_INVALID \type = REG_TYPE_R64 - .elseif reg_type_r32 <> REG_NUM_INVALID - \type = REG_TYPE_R32 .elseif reg_type_xmm <> REG_NUM_INVALID \type = REG_TYPE_XMM .else @@ -222,16 +162,6 @@ .byte \imm8 .endm - .macro PEXTRD imm8 xmm gpr - R32_NUM extrd_opd1 \gpr - XMM_NUM extrd_opd2 \xmm - PFX_OPD_SIZE - PFX_REX extrd_opd1 extrd_opd2 - .byte 0x0f, 0x3a, 0x16 - MODRM 0xc0 extrd_opd1 extrd_opd2 - .byte \imm8 - .endm - .macro AESKEYGENASSIST rcon xmm1 xmm2 XMM_NUM aeskeygen_opd1 \xmm1 XMM_NUM aeskeygen_opd2 \xmm2 diff --git a/trunk/arch/x86/include/asm/irq.h b/trunk/arch/x86/include/asm/irq.h index 57873beb3292..ba870bb6dd8e 100644 --- a/trunk/arch/x86/include/asm/irq.h +++ b/trunk/arch/x86/include/asm/irq.h @@ -41,9 +41,4 @@ extern int vector_used_by_percpu_irq(unsigned int vector); extern void init_ISA_irqs(void); -#ifdef CONFIG_X86_LOCAL_APIC -void arch_trigger_all_cpu_backtrace(void); -#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace -#endif - #endif /* _ASM_X86_IRQ_H */ diff --git a/trunk/arch/x86/include/asm/microcode.h b/trunk/arch/x86/include/asm/microcode.h index 6bc3985ee473..6825e2efd1b4 100644 --- a/trunk/arch/x86/include/asm/microcode.h +++ b/trunk/arch/x86/include/asm/microcode.h @@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {} #ifdef CONFIG_MICROCODE_EARLY #define MAX_UCODE_COUNT 128 extern void __init load_ucode_bsp(void); -extern void __cpuinit load_ucode_ap(void); +extern __init void load_ucode_ap(void); extern int __init save_microcode_in_initrd(void); #else static inline void __init load_ucode_bsp(void) {} -static inline void __cpuinit load_ucode_ap(void) {} +static inline __init void load_ucode_ap(void) {} static inline int __init save_microcode_in_initrd(void) { return 0; diff --git a/trunk/arch/x86/include/asm/nmi.h b/trunk/arch/x86/include/asm/nmi.h index 86f9301903c8..c0fa356e90de 100644 --- a/trunk/arch/x86/include/asm/nmi.h +++ b/trunk/arch/x86/include/asm/nmi.h @@ -18,7 +18,9 @@ extern int proc_nmi_enabled(struct ctl_table *, int , void __user *, size_t *, loff_t *); extern int unknown_nmi_panic; -#endif /* CONFIG_X86_LOCAL_APIC */ +void arch_trigger_all_cpu_backtrace(void); +#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace +#endif #define NMI_FLAG_FIRST 1 diff --git a/trunk/arch/x86/include/asm/pgtable.h b/trunk/arch/x86/include/asm/pgtable.h index 5b0818bc8963..1e672234c4ff 100644 --- a/trunk/arch/x86/include/asm/pgtable.h +++ b/trunk/arch/x86/include/asm/pgtable.h @@ -506,6 +506,9 @@ static inline unsigned long pages_to_mb(unsigned long npg) return npg >> (20 - PAGE_SHIFT); } +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #if PAGETABLE_LEVELS > 2 static inline int pud_none(pud_t pud) { diff --git a/trunk/arch/x86/include/uapi/asm/bootparam.h b/trunk/arch/x86/include/uapi/asm/bootparam.h index c15ddaf90710..08744242b8d2 100644 --- a/trunk/arch/x86/include/uapi/asm/bootparam.h +++ b/trunk/arch/x86/include/uapi/asm/bootparam.h @@ -6,6 +6,7 @@ #define SETUP_E820_EXT 1 #define SETUP_DTB 2 #define SETUP_PCI 3 +#define SETUP_EFI_VARS 4 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/trunk/arch/x86/kernel/apic/hw_nmi.c b/trunk/arch/x86/kernel/apic/hw_nmi.c index a698d7165c96..31cb9ae992b7 100644 --- a/trunk/arch/x86/kernel/apic/hw_nmi.c +++ b/trunk/arch/x86/kernel/apic/hw_nmi.c @@ -9,7 +9,6 @@ * */ #include -#include #include #include diff --git a/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c b/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c index 5f90b85ff22e..35ffda5d0727 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits) if (mtrr_tom2) x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; + nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); /* * [0, 1M) should always be covered by var mtrr with WB * and fixed mtrrs should take effect before var mtrr for it: */ - nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0, + nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, 1ULL<<(20 - PAGE_SHIFT)); - /* add from var mtrr at last */ - nr_range = x86_get_mtrr_mem_range(range, nr_range, - x_remove_base, x_remove_size); + /* Sort the ranges: */ + sort_range(range, nr_range); range_sums = sum_ranges(range, nr_range); printk(KERN_INFO "total RAM covered: %ldM\n", diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index a9e22073bd56..f60d41ff9a97 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), EVENT_EXTRA_END }; static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), - INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), EVENT_EXTRA_END }; diff --git a/trunk/arch/x86/kernel/head64.c b/trunk/arch/x86/kernel/head64.c index 55b67614ed94..dab95a85f7f8 100644 --- a/trunk/arch/x86/kernel/head64.c +++ b/trunk/arch/x86/kernel/head64.c @@ -34,7 +34,7 @@ extern pgd_t early_level4_pgt[PTRS_PER_PGD]; extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; static unsigned int __initdata next_early_pgt = 2; -pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); +pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); /* Wipe all early page tables except for the kernel symbol map */ static void __init reset_early_page_tables(void) diff --git a/trunk/arch/x86/kernel/head_64.S b/trunk/arch/x86/kernel/head_64.S index 321d65ebaffe..08f7e8039099 100644 --- a/trunk/arch/x86/kernel/head_64.S +++ b/trunk/arch/x86/kernel/head_64.S @@ -115,10 +115,8 @@ startup_64: movq %rdi, %rax shrq $PUD_SHIFT, %rax andl $(PTRS_PER_PUD-1), %eax - movq %rdx, 4096(%rbx,%rax,8) - incl %eax - andl $(PTRS_PER_PUD-1), %eax - movq %rdx, 4096(%rbx,%rax,8) + movq %rdx, (4096+0)(%rbx,%rax,8) + movq %rdx, (4096+8)(%rbx,%rax,8) addq $8192, %rbx movq %rdi, %rax diff --git a/trunk/arch/x86/kernel/i387.c b/trunk/arch/x86/kernel/i387.c index cb339097b9ea..245a71db401a 100644 --- a/trunk/arch/x86/kernel/i387.c +++ b/trunk/arch/x86/kernel/i387.c @@ -22,19 +22,23 @@ /* * Were we in an interrupt that interrupted kernel mode? * + * For now, with eagerfpu we will return interrupted kernel FPU + * state as not-idle. TBD: Ideally we can change the return value + * to something like __thread_has_fpu(current). But we need to + * be careful of doing __thread_clear_has_fpu() before saving + * the FPU etc for supporting nested uses etc. For now, take + * the simple route! + * * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that * pair does nothing at all: the thread must not have fpu (so * that we don't try to save the FPU state), and TS must * be set (so that the clts/stts pair does nothing that is * visible in the interrupted kernel thread). - * - * Except for the eagerfpu case when we return 1 unless we've already - * been eager and saved the state in kernel_fpu_begin(). */ static inline bool interrupted_kernel_fpu_idle(void) { if (use_eager_fpu()) - return __thread_has_fpu(current); + return 0; return !__thread_has_fpu(current) && (read_cr0() & X86_CR0_TS); @@ -74,8 +78,8 @@ void __kernel_fpu_begin(void) struct task_struct *me = current; if (__thread_has_fpu(me)) { - __thread_clear_has_fpu(me); __save_init_fpu(me); + __thread_clear_has_fpu(me); /* We do 'stts()' in __kernel_fpu_end() */ } else if (!use_eager_fpu()) { this_cpu_write(fpu_owner_task, NULL); diff --git a/trunk/arch/x86/kernel/kprobes/core.c b/trunk/arch/x86/kernel/kprobes/core.c index 211bce445522..9895a9a41380 100644 --- a/trunk/arch/x86/kernel/kprobes/core.c +++ b/trunk/arch/x86/kernel/kprobes/core.c @@ -365,14 +365,10 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) return insn.length; } -static int __kprobes arch_copy_kprobe(struct kprobe *p) +static void __kprobes arch_copy_kprobe(struct kprobe *p) { - int ret; - /* Copy an instruction with recovering if other optprobe modifies it.*/ - ret = __copy_instruction(p->ainsn.insn, p->addr); - if (!ret) - return -EINVAL; + __copy_instruction(p->ainsn.insn, p->addr); /* * __copy_instruction can modify the displacement of the instruction, @@ -388,8 +384,6 @@ static int __kprobes arch_copy_kprobe(struct kprobe *p) /* Also, displacement change doesn't affect the first byte */ p->opcode = p->ainsn.insn[0]; - - return 0; } int __kprobes arch_prepare_kprobe(struct kprobe *p) @@ -403,8 +397,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) return -ENOMEM; - - return arch_copy_kprobe(p); + arch_copy_kprobe(p); + return 0; } void __kprobes arch_arm_kprobe(struct kprobe *p) diff --git a/trunk/arch/x86/kernel/kvmclock.c b/trunk/arch/x86/kernel/kvmclock.c index 3dd37ebd591b..d2c381280e3c 100644 --- a/trunk/arch/x86/kernel/kvmclock.c +++ b/trunk/arch/x86/kernel/kvmclock.c @@ -242,7 +242,6 @@ void __init kvmclock_init(void) if (!mem) return; hv_clock = __va(mem); - memset(hv_clock, 0, size); if (kvm_register_clock("boot clock")) { hv_clock = NULL; diff --git a/trunk/arch/x86/kernel/microcode_intel_early.c b/trunk/arch/x86/kernel/microcode_intel_early.c index 2e9e12871c2b..d893e8ed8ac9 100644 --- a/trunk/arch/x86/kernel/microcode_intel_early.c +++ b/trunk/arch/x86/kernel/microcode_intel_early.c @@ -487,7 +487,6 @@ static inline void show_saved_mc(void) #endif #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) -static DEFINE_MUTEX(x86_cpu_microcode_mutex); /* * Save this mc into mc_saved_data. So it will be loaded early when a CPU is * hot added or resumes. @@ -508,7 +507,7 @@ int save_mc_for_early(u8 *mc) * Hold hotplug lock so mc_saved_data is not accessed by a CPU in * hotplug. */ - mutex_lock(&x86_cpu_microcode_mutex); + cpu_hotplug_driver_lock(); mc_saved_count_init = mc_saved_data.mc_saved_count; mc_saved_count = mc_saved_data.mc_saved_count; @@ -545,7 +544,7 @@ int save_mc_for_early(u8 *mc) } out: - mutex_unlock(&x86_cpu_microcode_mutex); + cpu_hotplug_driver_unlock(); return ret; } diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c index 81a5f5e8f142..607af0d4d5ef 100644 --- a/trunk/arch/x86/kernel/process.c +++ b/trunk/arch/x86/kernel/process.c @@ -277,6 +277,18 @@ void exit_idle(void) } #endif +void arch_cpu_idle_prepare(void) +{ + /* + * If we're the non-boot CPU, nothing set the stack canary up + * for us. CPU0 already has it initialized but no harm in + * doing it again. This is a good place for updating it, as + * we wont ever return from this function (so the invalid + * canaries already on the stack wont ever trigger). + */ + boot_init_stack_canary(); +} + void arch_cpu_idle_enter(void) { local_touch_nmi(); @@ -300,8 +312,6 @@ void arch_cpu_idle(void) { if (cpuidle_idle_call()) x86_idle(); - else - local_irq_enable(); } /* @@ -358,6 +368,9 @@ void amd_e400_remove_cpu(int cpu) */ static void amd_e400_idle(void) { + if (need_resched()) + return; + if (!amd_e400_c1e_detected) { u32 lo, hi; diff --git a/trunk/arch/x86/kernel/relocate_kernel_64.S b/trunk/arch/x86/kernel/relocate_kernel_64.S index f2bb9c96720a..7a6f3b3be3cf 100644 --- a/trunk/arch/x86/kernel/relocate_kernel_64.S +++ b/trunk/arch/x86/kernel/relocate_kernel_64.S @@ -160,7 +160,7 @@ identity_mapped: xorq %rbp, %rbp xorq %r8, %r8 xorq %r9, %r9 - xorq %r10, %r10 + xorq %r10, %r9 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index bfd348e99369..9c73b51817e4 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) void __cpuinit set_cpu_sibling_map(int cpu) { + bool has_mc = boot_cpu_data.x86_max_cores > 1; bool has_smt = smp_num_siblings > 1; - bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *o; int i; cpumask_set_cpu(cpu, cpu_sibling_setup_mask); - if (!has_mp) { + if (!has_smt && !has_mc) { cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); cpumask_set_cpu(cpu, cpu_core_mask(cpu)); @@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) if ((i == cpu) || (has_smt && match_smt(c, o))) link_mask(sibling, cpu, i); - if ((i == cpu) || (has_mp && match_llc(c, o))) + if ((i == cpu) || (has_mc && match_llc(c, o))) link_mask(llc_shared, cpu, i); } @@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) for_each_cpu(i, cpu_sibling_setup_mask) { o = &cpu_data(i); - if ((i == cpu) || (has_mp && match_mc(c, o))) { + if ((i == cpu) || (has_mc && match_mc(c, o))) { link_mask(core, cpu, i); /* diff --git a/trunk/arch/x86/kvm/emulate.c b/trunk/arch/x86/kvm/emulate.c index 5953dcea752d..8db0010ed150 100644 --- a/trunk/arch/x86/kvm/emulate.c +++ b/trunk/arch/x86/kvm/emulate.c @@ -1240,12 +1240,9 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, ctxt->modrm_seg = VCPU_SREG_DS; if (ctxt->modrm_mod == 3) { - int highbyte_regs = ctxt->rex_prefix == 0; - op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; - op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, - highbyte_regs && (ctxt->d & ByteOp)); + op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; @@ -4000,8 +3997,7 @@ static const struct opcode twobyte_table[256] = { DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM), N, N, /* 0x10 - 0x1F */ - N, N, N, N, N, N, N, N, - D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), + N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, /* 0x20 - 0x2F */ DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), @@ -4840,7 +4836,6 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) case 0x08: /* invd */ case 0x0d: /* GrpP (prefetch) */ case 0x18: /* Grp16 (prefetch/nop) */ - case 0x1f: /* nop */ break; case 0x20: /* mov cr, reg */ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); diff --git a/trunk/arch/x86/kvm/lapic.c b/trunk/arch/x86/kvm/lapic.c index 0eee2c8b64d1..e1adbb4aca75 100644 --- a/trunk/arch/x86/kvm/lapic.c +++ b/trunk/arch/x86/kvm/lapic.c @@ -1861,14 +1861,11 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; unsigned int sipi_vector; - unsigned long pe; - if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) + if (!kvm_vcpu_has_lapic(vcpu)) return; - pe = xchg(&apic->pending_events, 0); - - if (test_bit(KVM_APIC_INIT, &pe)) { + if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) { kvm_lapic_reset(vcpu); kvm_vcpu_reset(vcpu); if (kvm_vcpu_is_bsp(apic->vcpu)) @@ -1876,7 +1873,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) else vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; } - if (test_bit(KVM_APIC_SIPI, &pe) && + if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events) && vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { /* evaluate pending_events before reading the vector */ smp_rmb(); diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index e8ba99c34180..094b5d96ab14 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -582,6 +582,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) if (index != XCR_XFEATURE_ENABLED_MASK) return 1; xcr0 = xcr; + if (kvm_x86_ops->get_cpl(vcpu) != 0) + return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) @@ -595,8 +597,7 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { - if (kvm_x86_ops->get_cpl(vcpu) != 0 || - __kvm_set_xcr(vcpu, index, xcr)) { + if (__kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } diff --git a/trunk/arch/x86/mm/init.c b/trunk/arch/x86/mm/init.c index 1f34e9219775..fdc5dca14fb3 100644 --- a/trunk/arch/x86/mm/init.c +++ b/trunk/arch/x86/mm/init.c @@ -277,9 +277,6 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, end_pfn = limit_pfn; nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); - if (!after_bootmem) - adjust_range_page_size_mask(mr, nr_range); - /* try to merge same page size and continuous */ for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { unsigned long old_start; @@ -294,6 +291,9 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, nr_range--; } + if (!after_bootmem) + adjust_range_page_size_mask(mr, nr_range); + for (i = 0; i < nr_range; i++) printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", mr[i].start, mr[i].end - 1, @@ -359,17 +359,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, } /* - * We need to iterate through the E820 memory map and create direct mappings - * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply - * create direct mappings for all pfns from [0 to max_low_pfn) and - * [4GB to max_pfn) because of possible memory holes in high addresses - * that cannot be marked as UC by fixed/variable range MTRRs. - * Depending on the alignment of E820 ranges, this may possibly result - * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. - * - * init_mem_mapping() calls init_range_memory_mapping() with big range. - * That range would have hole in the middle or ends, and only ram parts - * will be mapped in init_range_memory_mapping(). + * would have hole in the middle or ends, and only ram parts will be mapped. */ static unsigned long __init init_range_memory_mapping( unsigned long r_start, @@ -429,13 +419,6 @@ void __init init_mem_mapping(void) max_pfn_mapped = 0; /* will get exact value next */ min_pfn_mapped = real_end >> PAGE_SHIFT; last_start = start = real_end; - - /* - * We start from the top (end of memory) and go to the bottom. - * The memblock_find_in_range() gets us a block of RAM from the - * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages - * for page table. - */ while (last_start > ISA_END_ADDRESS) { if (last_start > step_size) { start = round_down(last_start - 1, step_size); diff --git a/trunk/arch/x86/pci/common.c b/trunk/arch/x86/pci/common.c index 981c2dbd72cc..305c68b8d538 100644 --- a/trunk/arch/x86/pci/common.c +++ b/trunk/arch/x86/pci/common.c @@ -628,9 +628,7 @@ int pcibios_add_device(struct pci_dev *dev) pa_data = boot_params.hdr.setup_data; while (pa_data) { - data = ioremap(pa_data, sizeof(*rom)); - if (!data) - return -ENOMEM; + data = phys_to_virt(pa_data); if (data->type == SETUP_PCI) { rom = (struct pci_setup_rom *)data; @@ -647,7 +645,6 @@ int pcibios_add_device(struct pci_dev *dev) } } pa_data = data->next; - iounmap(data); } return 0; } diff --git a/trunk/arch/x86/pci/mrst.c b/trunk/arch/x86/pci/mrst.c index 6eb18c42a28a..0e0fabf17342 100644 --- a/trunk/arch/x86/pci/mrst.c +++ b/trunk/arch/x86/pci/mrst.c @@ -141,6 +141,11 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn, */ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) { + if (bus == 0 && (devfn == PCI_DEVFN(2, 0) + || devfn == PCI_DEVFN(0, 0) + || devfn == PCI_DEVFN(3, 0))) + return 1; + /* This is a workaround for A0 LNC bug where PCI status register does * not have new CAP bit set. can not be written by SW either. * @@ -150,10 +155,7 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) */ if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) return 0; - if (bus == 0 && (devfn == PCI_DEVFN(2, 0) - || devfn == PCI_DEVFN(0, 0) - || devfn == PCI_DEVFN(3, 0))) - return 1; + return 0; /* langwell on others */ } diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c index d2fbcedcf6ea..55856b2310d3 100644 --- a/trunk/arch/x86/platform/efi/efi.c +++ b/trunk/arch/x86/platform/efi/efi.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -53,12 +54,12 @@ #define EFI_DEBUG 1 -#define EFI_MIN_RESERVE 5120 - -#define EFI_DUMMY_GUID \ - EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9) - -static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 }; +/* + * There's some additional metadata associated with each + * variable. Intel's reference implementation is 60 bytes - bump that + * to account for potential alignment constraints + */ +#define VAR_METADATA_SIZE 64 struct efi __read_mostly efi = { .mps = EFI_INVALID_TABLE_ADDR, @@ -78,6 +79,13 @@ struct efi_memory_map memmap; static struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; +static u64 efi_var_store_size; +static u64 efi_var_remaining_size; +static u64 efi_var_max_var_size; +static u64 boot_used_size; +static u64 boot_var_size; +static u64 active_size; + unsigned long x86_efi_facility; /* @@ -180,8 +188,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { - return efi_call_virt3(get_next_variable, - name_size, name, vendor); + efi_status_t status; + static bool finished = false; + static u64 var_size; + + status = efi_call_virt3(get_next_variable, + name_size, name, vendor); + + if (status == EFI_NOT_FOUND) { + finished = true; + if (var_size < boot_used_size) { + boot_var_size = boot_used_size - var_size; + active_size += boot_var_size; + } else { + printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n"); + } + } + + if (boot_used_size && !finished) { + unsigned long size; + u32 attr; + efi_status_t s; + void *tmp; + + s = virt_efi_get_variable(name, vendor, &attr, &size, NULL); + + if (s != EFI_BUFFER_TOO_SMALL || !size) + return status; + + tmp = kmalloc(size, GFP_ATOMIC); + + if (!tmp) + return status; + + s = virt_efi_get_variable(name, vendor, &attr, &size, tmp); + + if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) { + var_size += size; + var_size += ucs2_strsize(name, 1024); + active_size += size; + active_size += VAR_METADATA_SIZE; + active_size += ucs2_strsize(name, 1024); + } + + kfree(tmp); + } + + return status; } static efi_status_t virt_efi_set_variable(efi_char16_t *name, @@ -190,9 +243,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, unsigned long data_size, void *data) { - return efi_call_virt5(set_variable, - name, vendor, attr, - data_size, data); + efi_status_t status; + u32 orig_attr = 0; + unsigned long orig_size = 0; + + status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size, + NULL); + + if (status != EFI_BUFFER_TOO_SMALL) + orig_size = 0; + + status = efi_call_virt5(set_variable, + name, vendor, attr, + data_size, data); + + if (status == EFI_SUCCESS) { + if (orig_size) { + active_size -= orig_size; + active_size -= ucs2_strsize(name, 1024); + active_size -= VAR_METADATA_SIZE; + } + if (data_size) { + active_size += data_size; + active_size += ucs2_strsize(name, 1024); + active_size += VAR_METADATA_SIZE; + } + } + + return status; } static efi_status_t virt_efi_query_variable_info(u32 attr, @@ -708,6 +786,9 @@ void __init efi_init(void) char vendor[100] = "unknown"; int i = 0; void *tmp; + struct setup_data *data; + struct efi_var_bootdata *efi_var_data; + u64 pa_data; #ifdef CONFIG_X86_32 if (boot_params.efi_info.efi_systab_hi || @@ -725,6 +806,22 @@ void __init efi_init(void) if (efi_systab_init(efi_phys.systab)) return; + pa_data = boot_params.hdr.setup_data; + while (pa_data) { + data = early_ioremap(pa_data, sizeof(*efi_var_data)); + if (data->type == SETUP_EFI_VARS) { + efi_var_data = (struct efi_var_bootdata *)data; + + efi_var_store_size = efi_var_data->store_size; + efi_var_remaining_size = efi_var_data->remaining_size; + efi_var_max_var_size = efi_var_data->max_var_size; + } + pa_data = data->next; + early_iounmap(data, sizeof(*efi_var_data)); + } + + boot_used_size = efi_var_store_size - efi_var_remaining_size; + set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); /* @@ -988,13 +1085,6 @@ void __init efi_enter_virtual_mode(void) runtime_code_page_mkexec(); kfree(new_memmap); - - /* clean DUMMY object */ - efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, - EFI_VARIABLE_NON_VOLATILE | - EFI_VARIABLE_BOOTSERVICE_ACCESS | - EFI_VARIABLE_RUNTIME_ACCESS, - 0, NULL); } /* @@ -1046,70 +1136,33 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) efi_status_t status; u64 storage_size, remaining_size, max_size; - if (!(attributes & EFI_VARIABLE_NON_VOLATILE)) - return 0; - status = efi.query_variable_info(attributes, &storage_size, &remaining_size, &max_size); if (status != EFI_SUCCESS) return status; + if (!max_size && remaining_size > size) + printk_once(KERN_ERR FW_BUG "Broken EFI implementation" + " is returning MaxVariableSize=0\n"); /* * Some firmware implementations refuse to boot if there's insufficient * space in the variable store. We account for that by refusing the * write if permitting it would reduce the available space to under - * 5KB. This figure was provided by Samsung, so should be safe. + * 50%. However, some firmware won't reclaim variable space until + * after the used (not merely the actively used) space drops below + * a threshold. We can approximate that case with the value calculated + * above. If both the firmware and our calculations indicate that the + * available space would drop below 50%, refuse the write. */ - if ((remaining_size - size < EFI_MIN_RESERVE) && - !efi_no_storage_paranoia) { - - /* - * Triggering garbage collection may require that the firmware - * generate a real EFI_OUT_OF_RESOURCES error. We can force - * that by attempting to use more space than is available. - */ - unsigned long dummy_size = remaining_size + 1024; - void *dummy = kzalloc(dummy_size, GFP_ATOMIC); - - if (!dummy) - return EFI_OUT_OF_RESOURCES; - - status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, - EFI_VARIABLE_NON_VOLATILE | - EFI_VARIABLE_BOOTSERVICE_ACCESS | - EFI_VARIABLE_RUNTIME_ACCESS, - dummy_size, dummy); - - if (status == EFI_SUCCESS) { - /* - * This should have failed, so if it didn't make sure - * that we delete it... - */ - efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, - EFI_VARIABLE_NON_VOLATILE | - EFI_VARIABLE_BOOTSERVICE_ACCESS | - EFI_VARIABLE_RUNTIME_ACCESS, - 0, dummy); - } - - kfree(dummy); - /* - * The runtime code may now have triggered a garbage collection - * run, so check the variable info again - */ - status = efi.query_variable_info(attributes, &storage_size, - &remaining_size, &max_size); + if (!storage_size || size > remaining_size || + (max_size && size > max_size)) + return EFI_OUT_OF_RESOURCES; - if (status != EFI_SUCCESS) - return status; - - /* - * There still isn't enough room, so return an error - */ - if (remaining_size - size < EFI_MIN_RESERVE) - return EFI_OUT_OF_RESOURCES; - } + if (!efi_no_storage_paranoia && + ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && + (remaining_size - size < storage_size / 2))) + return EFI_OUT_OF_RESOURCES; return EFI_SUCCESS; } diff --git a/trunk/arch/x86/tools/relocs.c b/trunk/arch/x86/tools/relocs.c index f7bab68a4b83..590be1090892 100644 --- a/trunk/arch/x86/tools/relocs.c +++ b/trunk/arch/x86/tools/relocs.c @@ -42,6 +42,9 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { "^(xen_irq_disable_direct_reloc$|" "xen_save_fl_direct_reloc$|" "VDSO|" +#if ELF_BITS == 64 + "__vvar_page|" +#endif "__crc_)", /* @@ -69,7 +72,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { "__per_cpu_load|" "init_per_cpu__.*|" "__end_rodata_hpage_align|" - "__vvar_page|" #endif "_end)$" }; diff --git a/trunk/arch/x86/xen/smp.c b/trunk/arch/x86/xen/smp.c index d99cae8147d1..8ff37995d54e 100644 --- a/trunk/arch/x86/xen/smp.c +++ b/trunk/arch/x86/xen/smp.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -448,13 +447,6 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ play_dead_common(); HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); cpu_bringup(); - /* - * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) - * clears certain data that the cpu_idle loop (which called us - * and that we return from) expects. The only way to get that - * data back is to call: - */ - tick_nohz_idle_enter(); } #else /* !CONFIG_HOTPLUG_CPU */ @@ -584,22 +576,24 @@ void xen_send_IPI_mask_allbutself(const struct cpumask *mask, { unsigned cpu; unsigned int this_cpu = smp_processor_id(); - int xen_vector = xen_map_vector(vector); - if (!(num_online_cpus() > 1) || (xen_vector < 0)) + if (!(num_online_cpus() > 1)) return; for_each_cpu_and(cpu, mask, cpu_online_mask) { if (this_cpu == cpu) continue; - xen_send_IPI_one(cpu, xen_vector); + xen_smp_send_call_function_single_ipi(cpu); } } void xen_send_IPI_allbutself(int vector) { - xen_send_IPI_mask_allbutself(cpu_online_mask, vector); + int xen_vector = xen_map_vector(vector); + + if (xen_vector >= 0) + xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector); } static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) diff --git a/trunk/arch/x86/xen/smp.h b/trunk/arch/x86/xen/smp.h index c7c2d89efd76..8981a76d081a 100644 --- a/trunk/arch/x86/xen/smp.h +++ b/trunk/arch/x86/xen/smp.h @@ -5,6 +5,7 @@ extern void xen_send_IPI_mask(const struct cpumask *mask, extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, int vector); extern void xen_send_IPI_allbutself(int vector); +extern void physflat_send_IPI_allbutself(int vector); extern void xen_send_IPI_all(int vector); extern void xen_send_IPI_self(int vector); diff --git a/trunk/arch/xtensa/include/asm/pgtable.h b/trunk/arch/xtensa/include/asm/pgtable.h index 8f017eb309bd..d7546c94da52 100644 --- a/trunk/arch/xtensa/include/asm/pgtable.h +++ b/trunk/arch/xtensa/include/asm/pgtable.h @@ -393,6 +393,14 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) extern void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep); +/* + * remap a physical page `pfn' of size `size' with page protection `prot' + * into virtual address `from' + */ + +#define io_remap_pfn_range(vma,from,pfn,size,prot) \ + remap_pfn_range(vma, from, pfn, size, prot) + typedef pte_t *pte_addr_t; #endif /* !defined (__ASSEMBLY__) */ diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c index d5745b5833c9..33c33bc99ddd 100644 --- a/trunk/block/blk-core.c +++ b/trunk/block/blk-core.c @@ -3164,7 +3164,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err) q->rpm_status = RPM_ACTIVE; __blk_run_queue(q); pm_runtime_mark_last_busy(q->dev); - pm_request_autosuspend(q->dev); + pm_runtime_autosuspend(q->dev); } else { q->rpm_status = RPM_SUSPENDED; } diff --git a/trunk/crypto/Kconfig b/trunk/crypto/Kconfig index bf8148e74e73..622d8a48cbe9 100644 --- a/trunk/crypto/Kconfig +++ b/trunk/crypto/Kconfig @@ -823,7 +823,6 @@ config CRYPTO_BLOWFISH_X86_64 config CRYPTO_BLOWFISH_AVX2_X86_64 tristate "Blowfish cipher algorithm (x86_64/AVX2)" depends on X86 && 64BIT - depends on BROKEN select CRYPTO_ALGAPI select CRYPTO_CRYPTD select CRYPTO_ABLK_HELPER_X86 @@ -1300,7 +1299,6 @@ config CRYPTO_TWOFISH_AVX_X86_64 config CRYPTO_TWOFISH_AVX2_X86_64 tristate "Twofish cipher algorithm (x86_64/AVX2)" depends on X86 && 64BIT - depends on BROKEN select CRYPTO_ALGAPI select CRYPTO_CRYPTD select CRYPTO_ABLK_HELPER_X86 diff --git a/trunk/crypto/algboss.c b/trunk/crypto/algboss.c index 76fc0b23fc6c..769219b29309 100644 --- a/trunk/crypto/algboss.c +++ b/trunk/crypto/algboss.c @@ -45,9 +45,10 @@ struct cryptomgr_param { } nu32; } attrs[CRYPTO_MAX_ATTRS]; + char larval[CRYPTO_MAX_ALG_NAME]; char template[CRYPTO_MAX_ALG_NAME]; - struct crypto_larval *larval; + struct completion *completion; u32 otype; u32 omask; @@ -86,8 +87,7 @@ static int cryptomgr_probe(void *data) crypto_tmpl_put(tmpl); out: - complete_all(¶m->larval->completion); - crypto_alg_put(¶m->larval->alg); + complete_all(param->completion); kfree(param); module_put_and_exit(0); } @@ -187,19 +187,18 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) param->otype = larval->alg.cra_flags; param->omask = larval->mask; - crypto_alg_get(&larval->alg); - param->larval = larval; + memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); + + param->completion = &larval->completion; thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe"); if (IS_ERR(thread)) - goto err_put_larval; + goto err_free_param; wait_for_completion_interruptible(&larval->completion); return NOTIFY_STOP; -err_put_larval: - crypto_alg_put(&larval->alg); err_free_param: kfree(param); err_put_module: diff --git a/trunk/crypto/api.c b/trunk/crypto/api.c index 3b6180336d3d..033a7147e5eb 100644 --- a/trunk/crypto/api.c +++ b/trunk/crypto/api.c @@ -34,6 +34,12 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem); BLOCKING_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); +static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) +{ + atomic_inc(&alg->cra_refcnt); + return alg; +} + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) { return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; diff --git a/trunk/crypto/internal.h b/trunk/crypto/internal.h index bd39bfc92eab..9ebedae3fb54 100644 --- a/trunk/crypto/internal.h +++ b/trunk/crypto/internal.h @@ -103,12 +103,6 @@ int crypto_register_notifier(struct notifier_block *nb); int crypto_unregister_notifier(struct notifier_block *nb); int crypto_probing_notify(unsigned long val, void *v); -static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) -{ - atomic_inc(&alg->cra_refcnt); - return alg; -} - static inline void crypto_alg_put(struct crypto_alg *alg) { if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) diff --git a/trunk/drivers/acpi/Makefile b/trunk/drivers/acpi/Makefile index 536562c626a2..ecb743bf05a5 100644 --- a/trunk/drivers/acpi/Makefile +++ b/trunk/drivers/acpi/Makefile @@ -24,7 +24,7 @@ acpi-y += nvs.o # Power management related files acpi-y += wakeup.o acpi-y += sleep.o -acpi-y += device_pm.o +acpi-$(CONFIG_PM) += device_pm.o acpi-$(CONFIG_ACPI_SLEEP) += proc.o @@ -38,6 +38,7 @@ acpi-y += processor_core.o acpi-y += ec.o acpi-$(CONFIG_ACPI_DOCK) += dock.o acpi-y += pci_root.o pci_link.o pci_irq.o +acpi-y += csrt.o acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o acpi-y += acpi_platform.o acpi-y += power.o diff --git a/trunk/drivers/acpi/ac.c b/trunk/drivers/acpi/ac.c index 4f4e741d34b2..00d2efd674df 100644 --- a/trunk/drivers/acpi/ac.c +++ b/trunk/drivers/acpi/ac.c @@ -28,8 +28,6 @@ #include #include #include -#include -#include #ifdef CONFIG_ACPI_PROCFS_POWER #include #include @@ -76,8 +74,6 @@ static int acpi_ac_resume(struct device *dev); #endif static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); -static int ac_sleep_before_get_state_ms; - static struct acpi_driver acpi_ac_driver = { .name = "ac", .class = ACPI_AC_CLASS, @@ -256,16 +252,6 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) case ACPI_AC_NOTIFY_STATUS: case ACPI_NOTIFY_BUS_CHECK: case ACPI_NOTIFY_DEVICE_CHECK: - /* - * A buggy BIOS may notify AC first and then sleep for - * a specific time before doing actual operations in the - * EC event handler (_Qxx). This will cause the AC state - * reported by the ACPI event to be incorrect, so wait for a - * specific time for the EC event handler to make progress. - */ - if (ac_sleep_before_get_state_ms > 0) - msleep(ac_sleep_before_get_state_ms); - acpi_ac_get_state(ac); acpi_bus_generate_proc_event(device, event, (u32) ac->state); acpi_bus_generate_netlink_event(device->pnp.device_class, @@ -278,24 +264,6 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) return; } -static int thinkpad_e530_quirk(const struct dmi_system_id *d) -{ - ac_sleep_before_get_state_ms = 1000; - return 0; -} - -static struct dmi_system_id ac_dmi_table[] = { - { - .callback = thinkpad_e530_quirk, - .ident = "thinkpad e530", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"), - }, - }, - {}, -}; - static int acpi_ac_add(struct acpi_device *device) { int result = 0; @@ -344,7 +312,6 @@ static int acpi_ac_add(struct acpi_device *device) kfree(ac); } - dmi_check_system(ac_dmi_table); return result; } diff --git a/trunk/drivers/acpi/acpi_lpss.c b/trunk/drivers/acpi/acpi_lpss.c index cab13f2fc28e..b1c95422ce74 100644 --- a/trunk/drivers/acpi/acpi_lpss.c +++ b/trunk/drivers/acpi/acpi_lpss.c @@ -35,16 +35,11 @@ ACPI_MODULE_NAME("acpi_lpss"); struct lpss_device_desc { bool clk_required; - const char *clkdev_name; + const char *clk_parent; bool ltr_required; unsigned int prv_offset; }; -static struct lpss_device_desc lpss_dma_desc = { - .clk_required = true, - .clkdev_name = "hclk", -}; - struct lpss_private_data { void __iomem *mmio_base; resource_size_t mmio_size; @@ -54,6 +49,7 @@ struct lpss_private_data { static struct lpss_device_desc lpt_dev_desc = { .clk_required = true, + .clk_parent = "lpss_clk", .prv_offset = 0x800, .ltr_required = true, }; @@ -64,9 +60,6 @@ static struct lpss_device_desc lpt_sdio_dev_desc = { }; static const struct acpi_device_id acpi_lpss_device_ids[] = { - /* Generic LPSS devices */ - { "INTL9C60", (unsigned long)&lpss_dma_desc }, - /* Lynxpoint LPSS devices */ { "INT33C0", (unsigned long)&lpt_dev_desc }, { "INT33C1", (unsigned long)&lpt_dev_desc }, @@ -98,27 +91,16 @@ static int register_device_clock(struct acpi_device *adev, struct lpss_private_data *pdata) { const struct lpss_device_desc *dev_desc = pdata->dev_desc; - struct lpss_clk_data *clk_data; if (!lpss_clk_dev) lpt_register_clock_device(); - clk_data = platform_get_drvdata(lpss_clk_dev); - if (!clk_data) - return -ENODEV; - - if (dev_desc->clkdev_name) { - clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name, - dev_name(&adev->dev)); - return 0; - } - - if (!pdata->mmio_base + if (!dev_desc->clk_parent || !pdata->mmio_base || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) return -ENODATA; pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev), - clk_data->name, 0, + dev_desc->clk_parent, 0, pdata->mmio_base + dev_desc->prv_offset, 0, 0, NULL); if (IS_ERR(pdata->clk)) @@ -164,24 +146,15 @@ static int acpi_lpss_create_device(struct acpi_device *adev, if (dev_desc->clk_required) { ret = register_device_clock(adev, pdata); if (ret) { - /* Skip the device, but continue the namespace scan. */ - ret = 0; - goto err_out; + /* + * Skip the device, but don't terminate the namespace + * scan. + */ + kfree(pdata); + return 0; } } - /* - * This works around a known issue in ACPI tables where LPSS devices - * have _PS0 and _PS3 without _PSC (and no power resources), so - * acpi_bus_init_power() will assume that the BIOS has put them into D0. - */ - ret = acpi_device_fix_up_power(adev); - if (ret) { - /* Skip the device, but continue the namespace scan. */ - ret = 0; - goto err_out; - } - adev->driver_data = pdata; ret = acpi_create_platform_device(adev, id); if (ret > 0) diff --git a/trunk/drivers/acpi/apei/cper.c b/trunk/drivers/acpi/apei/cper.c index 33dc6a004802..fefc2ca7cc3e 100644 --- a/trunk/drivers/acpi/apei/cper.c +++ b/trunk/drivers/acpi/apei/cper.c @@ -250,6 +250,10 @@ static const char *cper_pcie_port_type_strs[] = { static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, const struct acpi_hest_generic_data *gdata) { +#ifdef CONFIG_ACPI_APEI_PCIEAER + struct pci_dev *dev; +#endif + if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ? @@ -281,6 +285,20 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, printk( "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", pfx, pcie->bridge.secondary_status, pcie->bridge.control); +#ifdef CONFIG_ACPI_APEI_PCIEAER + dev = pci_get_domain_bus_and_slot(pcie->device_id.segment, + pcie->device_id.bus, pcie->device_id.function); + if (!dev) { + pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n", + pcie->device_id.segment, pcie->device_id.bus, + pcie->device_id.slot, pcie->device_id.function); + return; + } + if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) + cper_print_aer(pfx, dev, gdata->error_severity, + (struct aer_capability_regs *) pcie->aer_info); + pci_dev_put(dev); +#endif } static const char *apei_estatus_section_flag_strs[] = { diff --git a/trunk/drivers/acpi/apei/ghes.c b/trunk/drivers/acpi/apei/ghes.c index fcd7d91cec34..d668a8ae602b 100644 --- a/trunk/drivers/acpi/apei/ghes.c +++ b/trunk/drivers/acpi/apei/ghes.c @@ -454,9 +454,7 @@ static void ghes_do_proc(struct ghes *ghes, aer_severity = cper_severity_to_aer(sev); aer_recover_queue(pcie_err->device_id.segment, pcie_err->device_id.bus, - devfn, aer_severity, - (struct aer_capability_regs *) - pcie_err->aer_info); + devfn, aer_severity); } } @@ -919,14 +917,13 @@ static int ghes_probe(struct platform_device *ghes_dev) break; case ACPI_HEST_NOTIFY_EXTERNAL: /* External interrupt vector is GSI */ - rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq); - if (rc) { + if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) { pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", generic->header.source_id); goto err_edac_unreg; } - rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes); - if (rc) { + if (request_irq(ghes->irq, ghes_irq_func, + 0, "GHES IRQ", ghes)) { pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", generic->header.source_id); goto err_edac_unreg; diff --git a/trunk/drivers/acpi/csrt.c b/trunk/drivers/acpi/csrt.c new file mode 100644 index 000000000000..5c15a91faf0b --- /dev/null +++ b/trunk/drivers/acpi/csrt.c @@ -0,0 +1,159 @@ +/* + * Support for Core System Resources Table (CSRT) + * + * Copyright (C) 2013, Intel Corporation + * Authors: Mika Westerberg + * Andy Shevchenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "ACPI: CSRT: " fmt + +#include +#include +#include +#include +#include +#include + +ACPI_MODULE_NAME("CSRT"); + +static int __init acpi_csrt_parse_shared_info(struct platform_device *pdev, + const struct acpi_csrt_group *grp) +{ + const struct acpi_csrt_shared_info *si; + struct resource res[3]; + size_t nres; + int ret; + + memset(res, 0, sizeof(res)); + nres = 0; + + si = (const struct acpi_csrt_shared_info *)&grp[1]; + /* + * The peripherals that are listed on CSRT typically support only + * 32-bit addresses so we only use the low part of MMIO base for + * now. + */ + if (!si->mmio_base_high && si->mmio_base_low) { + /* + * There is no size of the memory resource in shared_info + * so we assume that it is 4k here. + */ + res[nres].start = si->mmio_base_low; + res[nres].end = res[0].start + SZ_4K - 1; + res[nres++].flags = IORESOURCE_MEM; + } + + if (si->gsi_interrupt) { + int irq = acpi_register_gsi(NULL, si->gsi_interrupt, + si->interrupt_mode, + si->interrupt_polarity); + res[nres].start = irq; + res[nres].end = irq; + res[nres++].flags = IORESOURCE_IRQ; + } + + if (si->base_request_line || si->num_handshake_signals) { + /* + * We pass the driver a DMA resource describing the range + * of request lines the device supports. + */ + res[nres].start = si->base_request_line; + res[nres].end = res[nres].start + si->num_handshake_signals - 1; + res[nres++].flags = IORESOURCE_DMA; + } + + ret = platform_device_add_resources(pdev, res, nres); + if (ret) { + if (si->gsi_interrupt) + acpi_unregister_gsi(si->gsi_interrupt); + return ret; + } + + return 0; +} + +static int __init +acpi_csrt_parse_resource_group(const struct acpi_csrt_group *grp) +{ + struct platform_device *pdev; + char vendor[5], name[16]; + int ret, i; + + vendor[0] = grp->vendor_id; + vendor[1] = grp->vendor_id >> 8; + vendor[2] = grp->vendor_id >> 16; + vendor[3] = grp->vendor_id >> 24; + vendor[4] = '\0'; + + if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info)) + return -ENODEV; + + snprintf(name, sizeof(name), "%s%04X", vendor, grp->device_id); + pdev = platform_device_alloc(name, PLATFORM_DEVID_AUTO); + if (!pdev) + return -ENOMEM; + + /* Add resources based on the shared info */ + ret = acpi_csrt_parse_shared_info(pdev, grp); + if (ret) + goto fail; + + ret = platform_device_add(pdev); + if (ret) + goto fail; + + for (i = 0; i < pdev->num_resources; i++) + dev_dbg(&pdev->dev, "%pR\n", &pdev->resource[i]); + + return 0; + +fail: + platform_device_put(pdev); + return ret; +} + +/* + * CSRT or Core System Resources Table is a proprietary ACPI table + * introduced by Microsoft. This table can contain devices that are not in + * the system DSDT table. In particular DMA controllers might be described + * here. + * + * We present these devices as normal platform devices that don't have ACPI + * IDs or handle. The platform device name will be something like + * ..auto for example: INTL9C06.0.auto. + */ +void __init acpi_csrt_init(void) +{ + struct acpi_csrt_group *grp, *end; + struct acpi_table_csrt *csrt; + acpi_status status; + int ret; + + status = acpi_get_table(ACPI_SIG_CSRT, 0, + (struct acpi_table_header **)&csrt); + if (ACPI_FAILURE(status)) { + if (status != AE_NOT_FOUND) + pr_warn("failed to get the CSRT table\n"); + return; + } + + pr_debug("parsing CSRT table for devices\n"); + + grp = (struct acpi_csrt_group *)(csrt + 1); + end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length); + + while (grp < end) { + ret = acpi_csrt_parse_resource_group(grp); + if (ret) { + pr_warn("error in parsing resource group: %d\n", ret); + return; + } + + grp = (struct acpi_csrt_group *)((void *)grp + grp->length); + } +} diff --git a/trunk/drivers/acpi/device_pm.c b/trunk/drivers/acpi/device_pm.c index 31c217a42839..96de787e6104 100644 --- a/trunk/drivers/acpi/device_pm.c +++ b/trunk/drivers/acpi/device_pm.c @@ -37,6 +37,68 @@ #define _COMPONENT ACPI_POWER_COMPONENT ACPI_MODULE_NAME("device_pm"); +static DEFINE_MUTEX(acpi_pm_notifier_lock); + +/** + * acpi_add_pm_notifier - Register PM notifier for given ACPI device. + * @adev: ACPI device to add the notifier for. + * @context: Context information to pass to the notifier routine. + * + * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of + * PM wakeup events. For example, wakeup events may be generated for bridges + * if one of the devices below the bridge is signaling wakeup, even if the + * bridge itself doesn't have a wakeup GPE associated with it. + */ +acpi_status acpi_add_pm_notifier(struct acpi_device *adev, + acpi_notify_handler handler, void *context) +{ + acpi_status status = AE_ALREADY_EXISTS; + + mutex_lock(&acpi_pm_notifier_lock); + + if (adev->wakeup.flags.notifier_present) + goto out; + + status = acpi_install_notify_handler(adev->handle, + ACPI_SYSTEM_NOTIFY, + handler, context); + if (ACPI_FAILURE(status)) + goto out; + + adev->wakeup.flags.notifier_present = true; + + out: + mutex_unlock(&acpi_pm_notifier_lock); + return status; +} + +/** + * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. + * @adev: ACPI device to remove the notifier from. + */ +acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, + acpi_notify_handler handler) +{ + acpi_status status = AE_BAD_PARAMETER; + + mutex_lock(&acpi_pm_notifier_lock); + + if (!adev->wakeup.flags.notifier_present) + goto out; + + status = acpi_remove_notify_handler(adev->handle, + ACPI_SYSTEM_NOTIFY, + handler); + if (ACPI_FAILURE(status)) + goto out; + + adev->wakeup.flags.notifier_present = false; + + out: + mutex_unlock(&acpi_pm_notifier_lock); + return status; +} + /** * acpi_power_state_string - String representation of ACPI device power state. * @state: ACPI device power state to return the string representation of. @@ -278,38 +340,16 @@ int acpi_bus_init_power(struct acpi_device *device) if (result) return result; } else if (state == ACPI_STATE_UNKNOWN) { - /* - * No power resources and missing _PSC? Cross fingers and make - * it D0 in hope that this is what the BIOS put the device into. - * [We tried to force D0 here by executing _PS0, but that broke - * Toshiba P870-303 in a nasty way.] - */ + /* No power resources and missing _PSC? Try to force D0. */ state = ACPI_STATE_D0; + result = acpi_dev_pm_explicit_set(device, state); + if (result) + return result; } device->power.state = state; return 0; } -/** - * acpi_device_fix_up_power - Force device with missing _PSC into D0. - * @device: Device object whose power state is to be fixed up. - * - * Devices without power resources and _PSC, but having _PS0 and _PS3 defined, - * are assumed to be put into D0 by the BIOS. However, in some cases that may - * not be the case and this function should be used then. - */ -int acpi_device_fix_up_power(struct acpi_device *device) -{ - int ret = 0; - - if (!device->power.flags.power_resources - && !device->power.flags.explicit_get - && device->power.state == ACPI_STATE_D0) - ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); - - return ret; -} - int acpi_bus_update_power(acpi_handle handle, int *state_p) { struct acpi_device *device; @@ -345,69 +385,6 @@ bool acpi_bus_power_manageable(acpi_handle handle) } EXPORT_SYMBOL(acpi_bus_power_manageable); -#ifdef CONFIG_PM -static DEFINE_MUTEX(acpi_pm_notifier_lock); - -/** - * acpi_add_pm_notifier - Register PM notifier for given ACPI device. - * @adev: ACPI device to add the notifier for. - * @context: Context information to pass to the notifier routine. - * - * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of - * PM wakeup events. For example, wakeup events may be generated for bridges - * if one of the devices below the bridge is signaling wakeup, even if the - * bridge itself doesn't have a wakeup GPE associated with it. - */ -acpi_status acpi_add_pm_notifier(struct acpi_device *adev, - acpi_notify_handler handler, void *context) -{ - acpi_status status = AE_ALREADY_EXISTS; - - mutex_lock(&acpi_pm_notifier_lock); - - if (adev->wakeup.flags.notifier_present) - goto out; - - status = acpi_install_notify_handler(adev->handle, - ACPI_SYSTEM_NOTIFY, - handler, context); - if (ACPI_FAILURE(status)) - goto out; - - adev->wakeup.flags.notifier_present = true; - - out: - mutex_unlock(&acpi_pm_notifier_lock); - return status; -} - -/** - * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. - * @adev: ACPI device to remove the notifier from. - */ -acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, - acpi_notify_handler handler) -{ - acpi_status status = AE_BAD_PARAMETER; - - mutex_lock(&acpi_pm_notifier_lock); - - if (!adev->wakeup.flags.notifier_present) - goto out; - - status = acpi_remove_notify_handler(adev->handle, - ACPI_SYSTEM_NOTIFY, - handler); - if (ACPI_FAILURE(status)) - goto out; - - adev->wakeup.flags.notifier_present = false; - - out: - mutex_unlock(&acpi_pm_notifier_lock); - return status; -} - bool acpi_bus_can_wakeup(acpi_handle handle) { struct acpi_device *device; @@ -1046,4 +1023,3 @@ void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev) mutex_unlock(&adev->physical_node_lock); } EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent); -#endif /* CONFIG_PM */ diff --git a/trunk/drivers/acpi/dock.c b/trunk/drivers/acpi/dock.c index 14de9f46972e..4fdea381ef21 100644 --- a/trunk/drivers/acpi/dock.c +++ b/trunk/drivers/acpi/dock.c @@ -66,21 +66,20 @@ struct dock_station { spinlock_t dd_lock; struct mutex hp_lock; struct list_head dependent_devices; + struct list_head hotplug_devices; struct list_head sibling; struct platform_device *dock_device; }; static LIST_HEAD(dock_stations); static int dock_station_count; -static DEFINE_MUTEX(hotplug_lock); struct dock_dependent_device { struct list_head list; + struct list_head hotplug_list; acpi_handle handle; - const struct acpi_dock_ops *hp_ops; - void *hp_context; - unsigned int hp_refcount; - void (*hp_release)(void *); + const struct acpi_dock_ops *ops; + void *context; }; #define DOCK_DOCKING 0x00000001 @@ -112,6 +111,7 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle) dd->handle = handle; INIT_LIST_HEAD(&dd->list); + INIT_LIST_HEAD(&dd->hotplug_list); spin_lock(&ds->dd_lock); list_add_tail(&dd->list, &ds->dependent_devices); @@ -121,90 +121,35 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle) } /** - * dock_init_hotplug - Initialize a hotplug device on a docking station. - * @dd: Dock-dependent device. - * @ops: Dock operations to attach to the dependent device. - * @context: Data to pass to the @ops callbacks and @release. - * @init: Optional initialization routine to run after setting up context. - * @release: Optional release routine to run on removal. + * dock_add_hotplug_device - associate a hotplug handler with the dock station + * @ds: The dock station + * @dd: The dependent device struct + * + * Add the dependent device to the dock's hotplug device list */ -static int dock_init_hotplug(struct dock_dependent_device *dd, - const struct acpi_dock_ops *ops, void *context, - void (*init)(void *), void (*release)(void *)) +static void +dock_add_hotplug_device(struct dock_station *ds, + struct dock_dependent_device *dd) { - int ret = 0; - - mutex_lock(&hotplug_lock); - - if (dd->hp_context) { - ret = -EEXIST; - } else { - dd->hp_refcount = 1; - dd->hp_ops = ops; - dd->hp_context = context; - dd->hp_release = release; - } - - if (!WARN_ON(ret) && init) - init(context); - - mutex_unlock(&hotplug_lock); - return ret; + mutex_lock(&ds->hp_lock); + list_add_tail(&dd->hotplug_list, &ds->hotplug_devices); + mutex_unlock(&ds->hp_lock); } /** - * dock_release_hotplug - Decrement hotplug reference counter of dock device. - * @dd: Dock-dependent device. + * dock_del_hotplug_device - remove a hotplug handler from the dock station + * @ds: The dock station + * @dd: the dependent device struct * - * Decrement the reference counter of @dd and if 0, detach its hotplug - * operations from it, reset its context pointer and run the optional release - * routine if present. + * Delete the dependent device from the dock's hotplug device list */ -static void dock_release_hotplug(struct dock_dependent_device *dd) +static void +dock_del_hotplug_device(struct dock_station *ds, + struct dock_dependent_device *dd) { - void (*release)(void *) = NULL; - void *context = NULL; - - mutex_lock(&hotplug_lock); - - if (dd->hp_context && !--dd->hp_refcount) { - dd->hp_ops = NULL; - context = dd->hp_context; - dd->hp_context = NULL; - release = dd->hp_release; - dd->hp_release = NULL; - } - - if (release && context) - release(context); - - mutex_unlock(&hotplug_lock); -} - -static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event, - bool uevent) -{ - acpi_notify_handler cb = NULL; - bool run = false; - - mutex_lock(&hotplug_lock); - - if (dd->hp_context) { - run = true; - dd->hp_refcount++; - if (dd->hp_ops) - cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler; - } - - mutex_unlock(&hotplug_lock); - - if (!run) - return; - - if (cb) - cb(dd->handle, event, dd->hp_context); - - dock_release_hotplug(dd); + mutex_lock(&ds->hp_lock); + list_del(&dd->hotplug_list); + mutex_unlock(&ds->hp_lock); } /** @@ -415,8 +360,9 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event) /* * First call driver specific hotplug functions */ - list_for_each_entry(dd, &ds->dependent_devices, list) - dock_hotplug_event(dd, event, false); + list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) + if (dd->ops && dd->ops->handler) + dd->ops->handler(dd->handle, event, dd->context); /* * Now make sure that an acpi_device is created for each @@ -452,8 +398,9 @@ static void dock_event(struct dock_station *ds, u32 event, int num) if (num == DOCK_EVENT) kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); - list_for_each_entry(dd, &ds->dependent_devices, list) - dock_hotplug_event(dd, event, true); + list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) + if (dd->ops && dd->ops->uevent) + dd->ops->uevent(dd->handle, event, dd->context); if (num != DOCK_EVENT) kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); @@ -623,24 +570,19 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier); * @handle: the handle of the device * @ops: handlers to call after docking * @context: device specific data - * @init: Optional initialization routine to run after registration - * @release: Optional release routine to run on unregistration * * If a driver would like to perform a hotplug operation after a dock * event, they can register an acpi_notifiy_handler to be called by * the dock driver after _DCK is executed. */ -int register_hotplug_dock_device(acpi_handle handle, - const struct acpi_dock_ops *ops, void *context, - void (*init)(void *), void (*release)(void *)) +int +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, + void *context) { struct dock_dependent_device *dd; struct dock_station *dock_station; int ret = -EINVAL; - if (WARN_ON(!context)) - return -EINVAL; - if (!dock_station_count) return -ENODEV; @@ -655,8 +597,12 @@ int register_hotplug_dock_device(acpi_handle handle, * ops */ dd = find_dock_dependent_device(dock_station, handle); - if (dd && !dock_init_hotplug(dd, ops, context, init, release)) + if (dd) { + dd->ops = ops; + dd->context = context; + dock_add_hotplug_device(dock_station, dd); ret = 0; + } } return ret; @@ -678,7 +624,7 @@ void unregister_hotplug_dock_device(acpi_handle handle) list_for_each_entry(dock_station, &dock_stations, sibling) { dd = find_dock_dependent_device(dock_station, handle); if (dd) - dock_release_hotplug(dd); + dock_del_hotplug_device(dock_station, dd); } } EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device); @@ -922,10 +868,8 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr, if (!count) return -EINVAL; - acpi_scan_lock_acquire(); begin_undock(dock_station); ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST); - acpi_scan_lock_release(); return ret ? ret: count; } static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock); @@ -1007,6 +951,7 @@ static int __init dock_add(acpi_handle handle) mutex_init(&dock_station->hp_lock); spin_lock_init(&dock_station->dd_lock); INIT_LIST_HEAD(&dock_station->sibling); + INIT_LIST_HEAD(&dock_station->hotplug_devices); ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); INIT_LIST_HEAD(&dock_station->dependent_devices); @@ -1046,6 +991,30 @@ static int __init dock_add(acpi_handle handle) return ret; } +/** + * dock_remove - free up resources related to the dock station + */ +static int dock_remove(struct dock_station *ds) +{ + struct dock_dependent_device *dd, *tmp; + struct platform_device *dock_device = ds->dock_device; + + if (!dock_station_count) + return 0; + + /* remove dependent devices */ + list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list) + kfree(dd); + + list_del(&ds->sibling); + + /* cleanup sysfs */ + sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group); + platform_device_unregister(dock_device); + + return 0; +} + /** * find_dock_and_bay - look for dock stations and bays * @handle: acpi handle of a device @@ -1064,7 +1033,7 @@ find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; } -int __init acpi_dock_init(void) +static int __init dock_init(void) { if (acpi_disabled) return 0; @@ -1083,3 +1052,19 @@ int __init acpi_dock_init(void) ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); return 0; } + +static void __exit dock_exit(void) +{ + struct dock_station *tmp, *dock_station; + + unregister_acpi_bus_notifier(&dock_acpi_notifier); + list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling) + dock_remove(dock_station); +} + +/* + * Must be called before drivers of devices in dock, otherwise we can't know + * which devices are in a dock + */ +subsys_initcall(dock_init); +module_exit(dock_exit); diff --git a/trunk/drivers/acpi/ec.c b/trunk/drivers/acpi/ec.c index edc00818c803..d45b2871d33b 100644 --- a/trunk/drivers/acpi/ec.c +++ b/trunk/drivers/acpi/ec.c @@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) static int ec_poll(struct acpi_ec *ec) { unsigned long flags; - int repeat = 5; /* number of command restarts */ + int repeat = 2; /* number of command restarts */ while (repeat--) { unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); @@ -241,6 +241,8 @@ static int ec_poll(struct acpi_ec *ec) } advance_transaction(ec, acpi_ec_read_status(ec)); } while (time_before(jiffies, delay)); + if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) + break; pr_debug(PREFIX "controller reset, restart transaction\n"); spin_lock_irqsave(&ec->lock, flags); start_transaction(ec); diff --git a/trunk/drivers/acpi/internal.h b/trunk/drivers/acpi/internal.h index c610a76d92c4..6f1afd9118c8 100644 --- a/trunk/drivers/acpi/internal.h +++ b/trunk/drivers/acpi/internal.h @@ -35,16 +35,12 @@ void acpi_pci_link_init(void); void acpi_pci_root_hp_init(void); void acpi_platform_init(void); int acpi_sysfs_init(void); +void acpi_csrt_init(void); #ifdef CONFIG_ACPI_CONTAINER void acpi_container_init(void); #else static inline void acpi_container_init(void) {} #endif -#ifdef CONFIG_ACPI_DOCK -void acpi_dock_init(void); -#else -static inline void acpi_dock_init(void) {} -#endif #ifdef CONFIG_ACPI_HOTPLUG_MEMORY void acpi_memory_hotplug_init(void); #else diff --git a/trunk/drivers/acpi/pci_root.c b/trunk/drivers/acpi/pci_root.c index e427dc516c76..1dd6f6c85874 100644 --- a/trunk/drivers/acpi/pci_root.c +++ b/trunk/drivers/acpi/pci_root.c @@ -641,9 +641,7 @@ static void _handle_hotplug_event_root(struct work_struct *work) /* bus enumerate */ printk(KERN_DEBUG "%s: Bus check notify on %s\n", __func__, (char *)buffer.pointer); - if (root) - acpiphp_check_host_bridge(handle); - else + if (!root) handle_root_bridge_insertion(handle); break; diff --git a/trunk/drivers/acpi/power.c b/trunk/drivers/acpi/power.c index 288bb270f8ed..f962047c6c85 100644 --- a/trunk/drivers/acpi/power.c +++ b/trunk/drivers/acpi/power.c @@ -885,7 +885,6 @@ int acpi_add_power_resource(acpi_handle handle) ACPI_STA_DEFAULT); mutex_init(&resource->resource_lock); INIT_LIST_HEAD(&resource->dependent); - INIT_LIST_HEAD(&resource->list_node); resource->name = device->pnp.bus_id; strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_CLASS); diff --git a/trunk/drivers/acpi/processor_driver.c b/trunk/drivers/acpi/processor_driver.c index c266cdc11784..bec717ffd25f 100644 --- a/trunk/drivers/acpi/processor_driver.c +++ b/trunk/drivers/acpi/processor_driver.c @@ -95,6 +95,9 @@ static const struct acpi_device_id processor_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, processor_device_ids); +static SIMPLE_DEV_PM_OPS(acpi_processor_pm, + acpi_processor_suspend, acpi_processor_resume); + static struct acpi_driver acpi_processor_driver = { .name = "processor", .class = ACPI_PROCESSOR_CLASS, @@ -104,6 +107,7 @@ static struct acpi_driver acpi_processor_driver = { .remove = acpi_processor_remove, .notify = acpi_processor_notify, }, + .drv.pm = &acpi_processor_pm, }; #define INSTALL_NOTIFY_HANDLER 1 @@ -930,8 +934,6 @@ static int __init acpi_processor_init(void) if (result < 0) return result; - acpi_processor_syscore_init(); - acpi_processor_install_hotplug_notify(); acpi_thermal_cpufreq_init(); @@ -954,8 +956,6 @@ static void __exit acpi_processor_exit(void) acpi_processor_uninstall_hotplug_notify(); - acpi_processor_syscore_exit(); - acpi_bus_unregister_driver(&acpi_processor_driver); return; diff --git a/trunk/drivers/acpi/processor_idle.c b/trunk/drivers/acpi/processor_idle.c index eb133c77aadb..f0df2c9434d2 100644 --- a/trunk/drivers/acpi/processor_idle.c +++ b/trunk/drivers/acpi/processor_idle.c @@ -34,7 +34,6 @@ #include /* need_resched() */ #include #include -#include /* * Include the apic definitions for x86 to have the APIC timer related defines @@ -211,41 +210,33 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, #endif -#ifdef CONFIG_PM_SLEEP static u32 saved_bm_rld; -int acpi_processor_suspend(void) +static void acpi_idle_bm_rld_save(void) { acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); - return 0; } - -void acpi_processor_resume(void) +static void acpi_idle_bm_rld_restore(void) { u32 resumed_bm_rld; acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); - if (resumed_bm_rld == saved_bm_rld) - return; - acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); + if (resumed_bm_rld != saved_bm_rld) + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); } -static struct syscore_ops acpi_processor_syscore_ops = { - .suspend = acpi_processor_suspend, - .resume = acpi_processor_resume, -}; - -void acpi_processor_syscore_init(void) +int acpi_processor_suspend(struct device *dev) { - register_syscore_ops(&acpi_processor_syscore_ops); + acpi_idle_bm_rld_save(); + return 0; } -void acpi_processor_syscore_exit(void) +int acpi_processor_resume(struct device *dev) { - unregister_syscore_ops(&acpi_processor_syscore_ops); + acpi_idle_bm_rld_restore(); + return 0; } -#endif /* CONFIG_PM_SLEEP */ #if defined(CONFIG_X86) static void tsc_check_state(int state) diff --git a/trunk/drivers/acpi/resource.c b/trunk/drivers/acpi/resource.c index 3322b47ab7ca..a3868f6c222a 100644 --- a/trunk/drivers/acpi/resource.c +++ b/trunk/drivers/acpi/resource.c @@ -304,8 +304,7 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi) } static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, - u8 triggering, u8 polarity, u8 shareable, - bool legacy) + u8 triggering, u8 polarity, u8 shareable) { int irq, p, t; @@ -318,19 +317,14 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, * In IO-APIC mode, use overrided attribute. Two reasons: * 1. BIOS bug in DSDT * 2. BIOS uses IO-APIC mode Interrupt Source Override - * - * We do this only if we are dealing with IRQ() or IRQNoFlags() - * resource (the legacy ISA resources). With modern ACPI 5 devices - * using extended IRQ descriptors we take the IRQ configuration - * from _CRS directly. */ - if (legacy && !acpi_get_override_irq(gsi, &t, &p)) { + if (!acpi_get_override_irq(gsi, &t, &p)) { u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; if (triggering != trig || polarity != pol) { pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, - t ? "level" : "edge", p ? "low" : "high"); + t ? "edge" : "level", p ? "low" : "high"); triggering = trig; polarity = pol; } @@ -379,7 +373,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, } acpi_dev_get_irqresource(res, irq->interrupts[index], irq->triggering, irq->polarity, - irq->sharable, true); + irq->sharable); break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: ext_irq = &ares->data.extended_irq; @@ -389,7 +383,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, } acpi_dev_get_irqresource(res, ext_irq->interrupts[index], ext_irq->triggering, ext_irq->polarity, - ext_irq->sharable, false); + ext_irq->sharable); break; default: return false; diff --git a/trunk/drivers/acpi/scan.c b/trunk/drivers/acpi/scan.c index 27da63061e11..fe158fd4f1df 100644 --- a/trunk/drivers/acpi/scan.c +++ b/trunk/drivers/acpi/scan.c @@ -1017,8 +1017,11 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver) return -ENOSYS; result = driver->ops.add(device); - if (result) + if (result) { + device->driver = NULL; + device->driver_data = NULL; return result; + } device->driver = driver; @@ -1782,7 +1785,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) acpi_set_pnp_ids(handle, &pnp, type); if (!pnp.type.hardware_id) - goto out; + return; /* * This relies on the fact that acpi_install_notify_handler() will not @@ -1797,7 +1800,6 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) } } -out: acpi_free_pnp_ids(&pnp); } @@ -2040,9 +2042,9 @@ int __init acpi_scan_init(void) acpi_pci_link_init(); acpi_platform_init(); acpi_lpss_init(); + acpi_csrt_init(); acpi_container_init(); acpi_memory_hotplug_init(); - acpi_dock_init(); mutex_lock(&acpi_scan_lock); /* diff --git a/trunk/drivers/acpi/video.c b/trunk/drivers/acpi/video.c index 440eadf2d32c..c3932d0876e0 100644 --- a/trunk/drivers/acpi/video.c +++ b/trunk/drivers/acpi/video.c @@ -456,30 +456,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), }, }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion g6 Notebook PC", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP 1000 Notebook PC", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion m4", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"), - }, - }, {} }; @@ -1722,9 +1698,6 @@ static int acpi_video_bus_add(struct acpi_device *device) int error; acpi_status status; - if (device->handler) - return -EINVAL; - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, device->parent->handle, 1, acpi_video_bus_match, NULL, diff --git a/trunk/drivers/acpi/video_detect.c b/trunk/drivers/acpi/video_detect.c index e6bd910bc6ed..66f67626f02e 100644 --- a/trunk/drivers/acpi/video_detect.c +++ b/trunk/drivers/acpi/video_detect.c @@ -161,14 +161,6 @@ static struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"), }, }, - { - .callback = video_detect_force_vendor, - .ident = "Asus UL30A", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), - }, - }, { }, }; diff --git a/trunk/drivers/ata/acard-ahci.c b/trunk/drivers/ata/acard-ahci.c index 9d0cf019ce59..4e94ba29cb8d 100644 --- a/trunk/drivers/ata/acard-ahci.c +++ b/trunk/drivers/ata/acard-ahci.c @@ -2,7 +2,7 @@ /* * acard-ahci.c - ACard AHCI SATA support * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index 2b50dfdf1cfc..251e57d38942 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -1,7 +1,7 @@ /* * ahci.c - AHCI SATA support * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -423,8 +423,6 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ - { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), - .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192), .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), diff --git a/trunk/drivers/ata/ahci.h b/trunk/drivers/ata/ahci.h index 10b14d45cfd2..b830e6c9fe49 100644 --- a/trunk/drivers/ata/ahci.h +++ b/trunk/drivers/ata/ahci.h @@ -1,7 +1,7 @@ /* * ahci.h - Common AHCI SATA definitions and declarations * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c index 9a8a674e8fac..2f48123d74c4 100644 --- a/trunk/drivers/ata/ata_piix.c +++ b/trunk/drivers/ata/ata_piix.c @@ -1,7 +1,7 @@ /* * ata_piix.c - Intel PATA/SATA controllers * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -151,7 +151,6 @@ enum piix_controller_ids { piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ ich8_sata_snb, ich8_2port_sata_snb, - ich8_2port_sata_byt, }; struct piix_map_db { @@ -335,9 +334,6 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Wellsburg) */ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, - /* SATA Controller IDE (BayTrail) */ - { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, - { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, { } /* terminate list */ }; @@ -445,7 +441,6 @@ static const struct piix_map_db *piix_map_db_table[] = { [tolapai_sata] = &tolapai_map_db, [ich8_sata_snb] = &ich8_map_db, [ich8_2port_sata_snb] = &ich8_2port_map_db, - [ich8_2port_sata_byt] = &ich8_2port_map_db, }; static struct pci_bits piix_enable_bits[] = { @@ -1259,16 +1254,6 @@ static struct ata_port_info piix_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, - - [ich8_2port_sata_byt] = - { - .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16, - .pio_mask = ATA_PIO4, - .mwdma_mask = ATA_MWDMA2, - .udma_mask = ATA_UDMA6, - .port_ops = &piix_sata_ops, - }, - }; #define AHCI_PCI_BAR 5 diff --git a/trunk/drivers/ata/libahci.c b/trunk/drivers/ata/libahci.c index a70ff154f586..34c82167b962 100644 --- a/trunk/drivers/ata/libahci.c +++ b/trunk/drivers/ata/libahci.c @@ -1,7 +1,7 @@ /* * libahci.c - Common AHCI SATA low-level routines * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/trunk/drivers/ata/libata-acpi.c b/trunk/drivers/ata/libata-acpi.c index cf4e7020adac..87f2f395d79a 100644 --- a/trunk/drivers/ata/libata-acpi.c +++ b/trunk/drivers/ata/libata-acpi.c @@ -156,10 +156,8 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, spin_unlock_irqrestore(ap->lock, flags); - if (wait) { + if (wait) ata_port_wait_eh(ap); - flush_work(&ap->hotplug_task.work); - } } static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data) @@ -216,39 +214,6 @@ static const struct acpi_dock_ops ata_acpi_ap_dock_ops = { .uevent = ata_acpi_ap_uevent, }; -void ata_acpi_hotplug_init(struct ata_host *host) -{ - int i; - - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; - acpi_handle handle; - struct ata_device *dev; - - if (!ap) - continue; - - handle = ata_ap_acpi_handle(ap); - if (handle) { - /* we might be on a docking station */ - register_hotplug_dock_device(handle, - &ata_acpi_ap_dock_ops, ap, - NULL, NULL); - } - - ata_for_each_dev(dev, &ap->link, ALL) { - handle = ata_dev_acpi_handle(dev); - if (!handle) - continue; - - /* we might be on a docking station */ - register_hotplug_dock_device(handle, - &ata_acpi_dev_dock_ops, - dev, NULL, NULL); - } - } -} - /** * ata_acpi_dissociate - dissociate ATA host from ACPI objects * @host: target ATA host diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c index adf002a3c584..63c743baf920 100644 --- a/trunk/drivers/ata/libata-core.c +++ b/trunk/drivers/ata/libata-core.c @@ -1,7 +1,7 @@ /* * libata-core.c - helper library for ATA * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -1602,12 +1602,6 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, qc->tf = *tf; if (cdb) memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); - - /* some SATA bridges need us to indicate data xfer direction */ - if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && - dma_dir == DMA_FROM_DEVICE) - qc->tf.feature |= ATAPI_DMADIR; - qc->flags |= ATA_QCFLAG_RESULT_TF; qc->dma_dir = dma_dir; if (dma_dir != DMA_NONE) { @@ -6148,8 +6142,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) if (rc) goto err_tadd; - ata_acpi_hotplug_init(host); - /* set cable, sata_spd_limit and report */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; diff --git a/trunk/drivers/ata/libata-eh.c b/trunk/drivers/ata/libata-eh.c index c69fcce505c0..f9476fb3ac43 100644 --- a/trunk/drivers/ata/libata-eh.c +++ b/trunk/drivers/ata/libata-eh.c @@ -1,7 +1,7 @@ /* * libata-eh.c - libata error handling * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c index 0101af541436..dd310b27b24c 100644 --- a/trunk/drivers/ata/libata-scsi.c +++ b/trunk/drivers/ata/libata-scsi.c @@ -1,7 +1,7 @@ /* * libata-scsi.c - helper library for ATA * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/trunk/drivers/ata/libata-sff.c b/trunk/drivers/ata/libata-sff.c index b603720b877d..d8af325a6bda 100644 --- a/trunk/drivers/ata/libata-sff.c +++ b/trunk/drivers/ata/libata-sff.c @@ -1,7 +1,7 @@ /* * libata-sff.c - helper library for PCI IDE BMDMA * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/trunk/drivers/ata/libata.h b/trunk/drivers/ata/libata.h index 577d902bc4de..c949dd311b2e 100644 --- a/trunk/drivers/ata/libata.h +++ b/trunk/drivers/ata/libata.h @@ -122,7 +122,6 @@ extern int ata_acpi_register(void); extern void ata_acpi_unregister(void); extern void ata_acpi_bind(struct ata_device *dev); extern void ata_acpi_unbind(struct ata_device *dev); -extern void ata_acpi_hotplug_init(struct ata_host *host); #else static inline void ata_acpi_dissociate(struct ata_host *host) { } static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } @@ -135,7 +134,6 @@ static inline int ata_acpi_register(void) { return 0; } static inline void ata_acpi_unregister(void) { } static inline void ata_acpi_bind(struct ata_device *dev) { } static inline void ata_acpi_unbind(struct ata_device *dev) { } -static inline void ata_acpi_hotplug_init(struct ata_host *host) {} #endif /* libata-scsi.c */ diff --git a/trunk/drivers/ata/pata_ep93xx.c b/trunk/drivers/ata/pata_ep93xx.c index 980b88e109fc..c1bfaf43d109 100644 --- a/trunk/drivers/ata/pata_ep93xx.c +++ b/trunk/drivers/ata/pata_ep93xx.c @@ -933,6 +933,11 @@ static int ep93xx_pata_probe(struct platform_device *pdev) } mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_res) { + err = -ENXIO; + goto err_rel_gpio; + } + ide_base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(ide_base)) { err = PTR_ERR(ide_base); diff --git a/trunk/drivers/ata/pdc_adma.c b/trunk/drivers/ata/pdc_adma.c index 8ea6e6afd041..505333340ad5 100644 --- a/trunk/drivers/ata/pdc_adma.c +++ b/trunk/drivers/ata/pdc_adma.c @@ -1,7 +1,7 @@ /* * pdc_adma.c - Pacific Digital Corporation ADMA * - * Maintained by: Tejun Heo + * Maintained by: Mark Lord * * Copyright 2005 Mark Lord * diff --git a/trunk/drivers/ata/sata_promise.c b/trunk/drivers/ata/sata_promise.c index 958ba2a420c3..fb0dd87f8893 100644 --- a/trunk/drivers/ata/sata_promise.c +++ b/trunk/drivers/ata/sata_promise.c @@ -1,7 +1,7 @@ /* * sata_promise.c - Promise SATA * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Mikael Pettersson * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. diff --git a/trunk/drivers/ata/sata_rcar.c b/trunk/drivers/ata/sata_rcar.c index 249c8a289bfd..4799868bd733 100644 --- a/trunk/drivers/ata/sata_rcar.c +++ b/trunk/drivers/ata/sata_rcar.c @@ -549,7 +549,6 @@ static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc) /* start host DMA transaction */ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG); - dmactl &= ~ATAPI_CONTROL1_STOP; dmactl |= ATAPI_CONTROL1_START; iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG); } @@ -619,16 +618,17 @@ static struct ata_port_operations sata_rcar_port_ops = { .bmdma_status = sata_rcar_bmdma_status, }; -static void sata_rcar_serr_interrupt(struct ata_port *ap) +static int sata_rcar_serr_interrupt(struct ata_port *ap) { struct sata_rcar_priv *priv = ap->host->private_data; struct ata_eh_info *ehi = &ap->link.eh_info; int freeze = 0; + int handled = 0; u32 serror; serror = ioread32(priv->base + SCRSERR_REG); if (!serror) - return; + return 0; DPRINTK("SError @host_intr: 0x%x\n", serror); @@ -641,6 +641,7 @@ static void sata_rcar_serr_interrupt(struct ata_port *ap) ata_ehi_push_desc(ehi, "%s", "hotplug"); freeze = serror & SERR_COMM_WAKE ? 0 : 1; + handled = 1; } /* freeze or abort */ @@ -648,9 +649,11 @@ static void sata_rcar_serr_interrupt(struct ata_port *ap) ata_port_freeze(ap); else ata_port_abort(ap); + + return handled; } -static void sata_rcar_ata_interrupt(struct ata_port *ap) +static int sata_rcar_ata_interrupt(struct ata_port *ap) { struct ata_queued_cmd *qc; int handled = 0; @@ -659,9 +662,7 @@ static void sata_rcar_ata_interrupt(struct ata_port *ap) if (qc) handled |= ata_bmdma_port_intr(ap, qc); - /* be sure to clear ATA interrupt */ - if (!handled) - sata_rcar_check_status(ap); + return handled; } static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance) @@ -676,21 +677,20 @@ static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance) spin_lock_irqsave(&host->lock, flags); sataintstat = ioread32(priv->base + SATAINTSTAT_REG); - sataintstat &= SATA_RCAR_INT_MASK; if (!sataintstat) goto done; /* ack */ - iowrite32(~sataintstat & 0x7ff, priv->base + SATAINTSTAT_REG); + iowrite32(sataintstat & ~SATA_RCAR_INT_MASK, + priv->base + SATAINTSTAT_REG); ap = host->ports[0]; if (sataintstat & SATAINTSTAT_ATA) - sata_rcar_ata_interrupt(ap); + handled |= sata_rcar_ata_interrupt(ap); if (sataintstat & SATAINTSTAT_SERR) - sata_rcar_serr_interrupt(ap); + handled |= sata_rcar_serr_interrupt(ap); - handled = 1; done: spin_unlock_irqrestore(&host->lock, flags); diff --git a/trunk/drivers/ata/sata_sil.c b/trunk/drivers/ata/sata_sil.c index 0ae3ca4bf5c0..a7b31672c4b7 100644 --- a/trunk/drivers/ata/sata_sil.c +++ b/trunk/drivers/ata/sata_sil.c @@ -1,7 +1,7 @@ /* * sata_sil.c - Silicon Image SATA * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/trunk/drivers/ata/sata_sx4.c b/trunk/drivers/ata/sata_sx4.c index 9947010afc0f..7b7127a58f51 100644 --- a/trunk/drivers/ata/sata_sx4.c +++ b/trunk/drivers/ata/sata_sx4.c @@ -1,7 +1,7 @@ /* * sata_sx4.c - Promise SATA * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/trunk/drivers/ata/sata_via.c b/trunk/drivers/ata/sata_via.c index 87f056e54a9d..5913ea9d57b2 100644 --- a/trunk/drivers/ata/sata_via.c +++ b/trunk/drivers/ata/sata_via.c @@ -1,7 +1,7 @@ /* * sata_via.c - VIA Serial ATA controllers * - * Maintained by: Tejun Heo + * Maintained by: Jeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * diff --git a/trunk/drivers/base/bus.c b/trunk/drivers/base/bus.c index d414331b480e..1a68f947ded8 100644 --- a/trunk/drivers/base/bus.c +++ b/trunk/drivers/base/bus.c @@ -1295,7 +1295,6 @@ int subsys_virtual_register(struct bus_type *subsys, return subsys_register(subsys, groups, virtual_dir); } -EXPORT_SYMBOL_GPL(subsys_virtual_register); int __init buses_init(void) { diff --git a/trunk/drivers/base/core.c b/trunk/drivers/base/core.c index 2499cefdcdf2..016312437577 100644 --- a/trunk/drivers/base/core.c +++ b/trunk/drivers/base/core.c @@ -572,11 +572,9 @@ int device_create_file(struct device *dev, if (dev) { WARN(((attr->attr.mode & S_IWUGO) && !attr->store), - "Attribute %s: write permission without 'store'\n", - attr->attr.name); + "Write permission without 'store'\n"); WARN(((attr->attr.mode & S_IRUGO) && !attr->show), - "Attribute %s: read permission without 'show'\n", - attr->attr.name); + "Read permission without 'show'\n"); error = sysfs_create_file(&dev->kobj, &attr->attr); } diff --git a/trunk/drivers/base/firmware_class.c b/trunk/drivers/base/firmware_class.c index 01e21037d8fe..4b1f9265887f 100644 --- a/trunk/drivers/base/firmware_class.c +++ b/trunk/drivers/base/firmware_class.c @@ -450,18 +450,8 @@ static void fw_load_abort(struct firmware_priv *fw_priv) { struct firmware_buf *buf = fw_priv->buf; - /* - * There is a small window in which user can write to 'loading' - * between loading done and disappearance of 'loading' - */ - if (test_bit(FW_STATUS_DONE, &buf->status)) - return; - set_bit(FW_STATUS_ABORT, &buf->status); complete_all(&buf->completion); - - /* avoid user action after loading abort */ - fw_priv->buf = NULL; } #define is_fw_load_aborted(buf) \ @@ -538,12 +528,7 @@ static ssize_t firmware_loading_show(struct device *dev, struct device_attribute *attr, char *buf) { struct firmware_priv *fw_priv = to_firmware_priv(dev); - int loading = 0; - - mutex_lock(&fw_lock); - if (fw_priv->buf) - loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); - mutex_unlock(&fw_lock); + int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); return sprintf(buf, "%d\n", loading); } @@ -585,12 +570,12 @@ static ssize_t firmware_loading_store(struct device *dev, const char *buf, size_t count) { struct firmware_priv *fw_priv = to_firmware_priv(dev); - struct firmware_buf *fw_buf; + struct firmware_buf *fw_buf = fw_priv->buf; int loading = simple_strtol(buf, NULL, 10); int i; mutex_lock(&fw_lock); - fw_buf = fw_priv->buf; + if (!fw_buf) goto out; @@ -792,6 +777,10 @@ static void firmware_class_timeout_work(struct work_struct *work) struct firmware_priv, timeout_work.work); mutex_lock(&fw_lock); + if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) { + mutex_unlock(&fw_lock); + return; + } fw_load_abort(fw_priv); mutex_unlock(&fw_lock); } @@ -872,6 +861,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, cancel_delayed_work_sync(&fw_priv->timeout_work); + fw_priv->buf = NULL; + device_remove_file(f_dev, &dev_attr_loading); err_del_bin_attr: device_remove_bin_file(f_dev, &firmware_attr_data); diff --git a/trunk/drivers/base/power/common.c b/trunk/drivers/base/power/common.c index 5da914041305..39c32529b833 100644 --- a/trunk/drivers/base/power/common.c +++ b/trunk/drivers/base/power/common.c @@ -61,24 +61,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); int dev_pm_put_subsys_data(struct device *dev) { struct pm_subsys_data *psd; - int ret = 1; + int ret = 0; spin_lock_irq(&dev->power.lock); psd = dev_to_psd(dev); - if (!psd) + if (!psd) { + ret = -EINVAL; goto out; + } if (--psd->refcount == 0) { dev->power.subsys_data = NULL; - } else { - psd = NULL; - ret = 0; + kfree(psd); + ret = 1; } out: spin_unlock_irq(&dev->power.lock); - kfree(psd); return ret; } diff --git a/trunk/drivers/base/regmap/regcache-rbtree.c b/trunk/drivers/base/regmap/regcache-rbtree.c index 02f490bad30f..aa0875f6f1b7 100644 --- a/trunk/drivers/base/regmap/regcache-rbtree.c +++ b/trunk/drivers/base/regmap/regcache-rbtree.c @@ -143,7 +143,7 @@ static int rbtree_show(struct seq_file *s, void *ignored) int registers = 0; int this_registers, average; - map->lock(map->lock_arg); + map->lock(map); mem_size = sizeof(*rbtree_ctx); mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long); @@ -170,7 +170,7 @@ static int rbtree_show(struct seq_file *s, void *ignored) seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", nodes, registers, average, mem_size); - map->unlock(map->lock_arg); + map->unlock(map); return 0; } @@ -391,6 +391,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { rbnode = rb_entry(node, struct regcache_rbtree_node, node); + if (rbnode->base_reg < min) + continue; if (rbnode->base_reg > max) break; if (rbnode->base_reg + rbnode->blklen < min) diff --git a/trunk/drivers/base/regmap/regcache.c b/trunk/drivers/base/regmap/regcache.c index 507ee2da0f6e..75923f2396bd 100644 --- a/trunk/drivers/base/regmap/regcache.c +++ b/trunk/drivers/base/regmap/regcache.c @@ -270,7 +270,7 @@ int regcache_sync(struct regmap *map) BUG_ON(!map->cache_ops || !map->cache_ops->sync); - map->lock(map->lock_arg); + map->lock(map); /* Remember the initial bypass state */ bypass = map->cache_bypass; dev_dbg(map->dev, "Syncing %s cache\n", @@ -306,7 +306,7 @@ int regcache_sync(struct regmap *map) trace_regcache_sync(map->dev, name, "stop"); /* Restore the bypass state */ map->cache_bypass = bypass; - map->unlock(map->lock_arg); + map->unlock(map); return ret; } @@ -333,7 +333,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, BUG_ON(!map->cache_ops || !map->cache_ops->sync); - map->lock(map->lock_arg); + map->lock(map); /* Remember the initial bypass state */ bypass = map->cache_bypass; @@ -352,7 +352,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, trace_regcache_sync(map->dev, name, "stop region"); /* Restore the bypass state */ map->cache_bypass = bypass; - map->unlock(map->lock_arg); + map->unlock(map); return ret; } @@ -372,11 +372,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region); */ void regcache_cache_only(struct regmap *map, bool enable) { - map->lock(map->lock_arg); + map->lock(map); WARN_ON(map->cache_bypass && enable); map->cache_only = enable; trace_regmap_cache_only(map->dev, enable); - map->unlock(map->lock_arg); + map->unlock(map); } EXPORT_SYMBOL_GPL(regcache_cache_only); @@ -391,9 +391,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only); */ void regcache_mark_dirty(struct regmap *map) { - map->lock(map->lock_arg); + map->lock(map); map->cache_dirty = true; - map->unlock(map->lock_arg); + map->unlock(map); } EXPORT_SYMBOL_GPL(regcache_mark_dirty); @@ -410,11 +410,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty); */ void regcache_cache_bypass(struct regmap *map, bool enable) { - map->lock(map->lock_arg); + map->lock(map); WARN_ON(map->cache_only && enable); map->cache_bypass = enable; trace_regmap_cache_bypass(map->dev, enable); - map->unlock(map->lock_arg); + map->unlock(map); } EXPORT_SYMBOL_GPL(regcache_cache_bypass); diff --git a/trunk/drivers/base/regmap/regmap-debugfs.c b/trunk/drivers/base/regmap/regmap-debugfs.c index 975719bc3450..23b701f5fd2f 100644 --- a/trunk/drivers/base/regmap/regmap-debugfs.c +++ b/trunk/drivers/base/regmap/regmap-debugfs.c @@ -265,7 +265,6 @@ static ssize_t regmap_map_write_file(struct file *file, char *start = buf; unsigned long reg, value; struct regmap *map = file->private_data; - int ret; buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) @@ -283,9 +282,7 @@ static ssize_t regmap_map_write_file(struct file *file, /* Userspace has been fiddling around behind the kernel's back */ add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); - ret = regmap_write(map, reg, value); - if (ret < 0) - return ret; + regmap_write(map, reg, value); return buf_size; } #else diff --git a/trunk/drivers/bcma/scan.c b/trunk/drivers/bcma/scan.c index 8bffa5c9818c..bca9c80056fe 100644 --- a/trunk/drivers/bcma/scan.c +++ b/trunk/drivers/bcma/scan.c @@ -84,8 +84,6 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = { { BCMA_CORE_I2S, "I2S" }, { BCMA_CORE_SDR_DDR1_MEM_CTL, "SDR/DDR1 Memory Controller" }, { BCMA_CORE_SHIM, "SHIM" }, - { BCMA_CORE_PCIE2, "PCIe Gen2" }, - { BCMA_CORE_ARM_CR4, "ARM CR4" }, { BCMA_CORE_DEFAULT, "Default" }, }; diff --git a/trunk/drivers/block/brd.c b/trunk/drivers/block/brd.c index 9bf4371755f2..f1a29f8e9d33 100644 --- a/trunk/drivers/block/brd.c +++ b/trunk/drivers/block/brd.c @@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) spin_lock(&brd->brd_lock); idx = sector >> PAGE_SECTORS_SHIFT; - page->index = idx; if (radix_tree_insert(&brd->brd_pages, idx, page)) { __free_page(page); page = radix_tree_lookup(&brd->brd_pages, idx); BUG_ON(!page); BUG_ON(page->index != idx); - } + } else + page->index = idx; spin_unlock(&brd->brd_lock); radix_tree_preload_end(); diff --git a/trunk/drivers/block/cciss.c b/trunk/drivers/block/cciss.c index 62b6c2cc80b5..6374dc103521 100644 --- a/trunk/drivers/block/cciss.c +++ b/trunk/drivers/block/cciss.c @@ -168,6 +168,8 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); static int cciss_open(struct block_device *bdev, fmode_t mode); static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); static void cciss_release(struct gendisk *disk, fmode_t mode); +static int do_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); static int cciss_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); @@ -233,7 +235,7 @@ static const struct block_device_operations cciss_fops = { .owner = THIS_MODULE, .open = cciss_unlocked_open, .release = cciss_release, - .ioctl = cciss_ioctl, + .ioctl = do_ioctl, .getgeo = cciss_getgeo, #ifdef CONFIG_COMPAT .compat_ioctl = cciss_compat_ioctl, @@ -1141,6 +1143,16 @@ static void cciss_release(struct gendisk *disk, fmode_t mode) mutex_unlock(&cciss_mutex); } +static int do_ioctl(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg) +{ + int ret; + mutex_lock(&cciss_mutex); + ret = cciss_ioctl(bdev, mode, cmd, arg); + mutex_unlock(&cciss_mutex); + return ret; +} + #ifdef CONFIG_COMPAT static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, @@ -1167,7 +1179,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, case CCISS_REGNEWD: case CCISS_RESCANDISK: case CCISS_GETLUNINFO: - return cciss_ioctl(bdev, mode, cmd, arg); + return do_ioctl(bdev, mode, cmd, arg); case CCISS_PASSTHRU32: return cciss_ioctl32_passthru(bdev, mode, cmd, arg); @@ -1207,7 +1219,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, if (err) return -EFAULT; - err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); + err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); if (err) return err; err |= @@ -1249,7 +1261,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, if (err) return -EFAULT; - err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); + err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); if (err) return err; err |= @@ -1299,14 +1311,11 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp) static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) { cciss_coalint_struct intinfo; - unsigned long flags; if (!argp) return -EINVAL; - spin_lock_irqsave(&h->lock, flags); intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); - spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user (argp, &intinfo, sizeof(cciss_coalint_struct))) return -EFAULT; @@ -1347,15 +1356,12 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp) static int cciss_getnodename(ctlr_info_t *h, void __user *argp) { NodeName_type NodeName; - unsigned long flags; int i; if (!argp) return -EINVAL; - spin_lock_irqsave(&h->lock, flags); for (i = 0; i < 16; i++) NodeName[i] = readb(&h->cfgtable->ServerName[i]); - spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) return -EFAULT; return 0; @@ -1392,13 +1398,10 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp) static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) { Heartbeat_type heartbeat; - unsigned long flags; if (!argp) return -EINVAL; - spin_lock_irqsave(&h->lock, flags); heartbeat = readl(&h->cfgtable->HeartBeat); - spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) return -EFAULT; return 0; @@ -1407,13 +1410,10 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) { BusTypes_type BusTypes; - unsigned long flags; if (!argp) return -EINVAL; - spin_lock_irqsave(&h->lock, flags); BusTypes = readl(&h->cfgtable->BusTypes); - spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) return -EFAULT; return 0; diff --git a/trunk/drivers/block/cryptoloop.c b/trunk/drivers/block/cryptoloop.c index 99e773cb70d0..8b6bb764b0a3 100644 --- a/trunk/drivers/block/cryptoloop.c +++ b/trunk/drivers/block/cryptoloop.c @@ -25,9 +25,9 @@ #include #include #include +#include #include #include -#include "loop.h" MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI"); diff --git a/trunk/drivers/block/loop.c b/trunk/drivers/block/loop.c index 40e715531aa6..d92d50fd84b7 100644 --- a/trunk/drivers/block/loop.c +++ b/trunk/drivers/block/loop.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include #include @@ -75,7 +76,6 @@ #include #include #include -#include "loop.h" #include diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.c b/trunk/drivers/block/mtip32xx/mtip32xx.c index 20dd52a2f92f..847107ef0cce 100644 --- a/trunk/drivers/block/mtip32xx/mtip32xx.c +++ b/trunk/drivers/block/mtip32xx/mtip32xx.c @@ -3002,8 +3002,7 @@ static int mtip_hw_debugfs_init(struct driver_data *dd) static void mtip_hw_debugfs_exit(struct driver_data *dd) { - if (dd->dfs_node) - debugfs_remove_recursive(dd->dfs_node); + debugfs_remove_recursive(dd->dfs_node); } @@ -3864,7 +3863,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) struct driver_data *dd = queue->queuedata; struct scatterlist *sg; struct bio_vec *bvec; - int i, nents = 0; + int nents = 0; int tag = 0, unaligned = 0; if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { @@ -3922,12 +3921,11 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) } /* Create the scatter list for this bio. */ - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, nents) { sg_set_page(&sg[nents], bvec->bv_page, bvec->bv_len, bvec->bv_offset); - nents++; } /* Issue the read/write. */ diff --git a/trunk/drivers/block/nvme-core.c b/trunk/drivers/block/nvme-core.c index ce79a590b45b..8efdfaa44a59 100644 --- a/trunk/drivers/block/nvme-core.c +++ b/trunk/drivers/block/nvme-core.c @@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, struct nvme_command *cmnd; struct nvme_iod *iod; enum dma_data_direction dma_dir; - int cmdid, length, result; + int cmdid, length, result = -ENOMEM; u16 control; u32 dsmgmt; int psegs = bio_phys_segments(ns->queue, bio); @@ -640,7 +640,6 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, return result; } - result = -ENOMEM; iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); if (!iod) goto nomem; @@ -978,8 +977,6 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) if (timeout && !time_after(now, info[cmdid].timeout)) continue; - if (info[cmdid].ctx == CMD_CTX_CANCELLED) - continue; dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); ctx = cancel_cmdid(nvmeq, cmdid, &fn); fn(nvmeq->dev, ctx, &cqe); @@ -1209,7 +1206,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, if (addr & 3) return ERR_PTR(-EINVAL); - if (!length || length > INT_MAX - PAGE_SIZE) + if (!length) return ERR_PTR(-EINVAL); offset = offset_in_page(addr); @@ -1230,8 +1227,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, sg_init_table(sg, count); for (i = 0; i < count; i++) { sg_set_page(&sg[i], pages[i], - min_t(unsigned, length, PAGE_SIZE - offset), - offset); + min_t(int, length, PAGE_SIZE - offset), offset); length -= (PAGE_SIZE - offset); offset = 0; } @@ -1439,7 +1435,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, nvme_free_iod(dev, iod); } - if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result, + if (!status && copy_to_user(&ucmd->result, &cmd.result, sizeof(cmd.result))) status = -EFAULT; @@ -1637,8 +1633,7 @@ static int set_queue_count(struct nvme_dev *dev, int count) static int nvme_setup_io_queues(struct nvme_dev *dev) { - struct pci_dev *pdev = dev->pci_dev; - int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count; + int result, cpu, i, nr_io_queues, db_bar_size, q_depth; nr_io_queues = num_online_cpus(); result = set_queue_count(dev, nr_io_queues); @@ -1647,14 +1642,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (result < nr_io_queues) nr_io_queues = result; - q_count = nr_io_queues; /* Deregister the admin queue's interrupt */ free_irq(dev->entry[0].vector, dev->queues[0]); db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); if (db_bar_size > 8192) { iounmap(dev->bar); - dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size); + dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), + db_bar_size); dev->dbs = ((void __iomem *)dev->bar) + 4096; dev->queues[0]->q_db = dev->dbs; } @@ -1662,36 +1657,19 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) for (i = 0; i < nr_io_queues; i++) dev->entry[i].entry = i; for (;;) { - result = pci_enable_msix(pdev, dev->entry, nr_io_queues); + result = pci_enable_msix(dev->pci_dev, dev->entry, + nr_io_queues); if (result == 0) { break; } else if (result > 0) { nr_io_queues = result; continue; } else { - nr_io_queues = 0; + nr_io_queues = 1; break; } } - if (nr_io_queues == 0) { - nr_io_queues = q_count; - for (;;) { - result = pci_enable_msi_block(pdev, nr_io_queues); - if (result == 0) { - for (i = 0; i < nr_io_queues; i++) - dev->entry[i].vector = i + pdev->irq; - break; - } else if (result > 0) { - nr_io_queues = result; - continue; - } else { - nr_io_queues = 1; - break; - } - } - } - result = queue_request_irq(dev, dev->queues[0], "nvme admin"); /* XXX: handle failure here */ @@ -1872,10 +1850,7 @@ static void nvme_free_dev(struct kref *kref) { struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); nvme_dev_remove(dev); - if (dev->pci_dev->msi_enabled) - pci_disable_msi(dev->pci_dev); - else if (dev->pci_dev->msix_enabled) - pci_disable_msix(dev->pci_dev); + pci_disable_msix(dev->pci_dev); iounmap(dev->bar); nvme_release_instance(dev); nvme_release_prp_pools(dev); @@ -1948,14 +1923,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&dev->namespaces); dev->pci_dev = pdev; pci_set_drvdata(pdev, dev); - - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - else - goto disable; - + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); result = nvme_set_instance(dev); if (result) goto disable; @@ -2008,10 +1977,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) unmap: iounmap(dev->bar); disable_msix: - if (dev->pci_dev->msi_enabled) - pci_disable_msi(dev->pci_dev); - else if (dev->pci_dev->msix_enabled) - pci_disable_msix(dev->pci_dev); + pci_disable_msix(pdev); nvme_release_instance(dev); nvme_release_prp_pools(dev); disable: diff --git a/trunk/drivers/block/nvme-scsi.c b/trunk/drivers/block/nvme-scsi.c index 102de2f52b5c..fed54b039893 100644 --- a/trunk/drivers/block/nvme-scsi.c +++ b/trunk/drivers/block/nvme-scsi.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -1653,7 +1654,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, } } -static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, +static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 *mode_page, u8 page_code) { int res = SNTI_TRANSLATION_SUCCESS; diff --git a/trunk/drivers/block/pktcdvd.c b/trunk/drivers/block/pktcdvd.c index f5d0ea11d9fd..3c08983e600a 100644 --- a/trunk/drivers/block/pktcdvd.c +++ b/trunk/drivers/block/pktcdvd.c @@ -83,8 +83,7 @@ #define MAX_SPEED 0xffff -#define ZONE(sector, pd) (((sector) + (pd)->offset) & \ - ~(sector_t)((pd)->settings.size - 1)) +#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1)) static DEFINE_MUTEX(pktcdvd_mutex); static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c index aff789d6fccd..ca63104136e0 100644 --- a/trunk/drivers/block/rbd.c +++ b/trunk/drivers/block/rbd.c @@ -55,39 +55,6 @@ #define SECTOR_SHIFT 9 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) -/* - * Increment the given counter and return its updated value. - * If the counter is already 0 it will not be incremented. - * If the counter is already at its maximum value returns - * -EINVAL without updating it. - */ -static int atomic_inc_return_safe(atomic_t *v) -{ - unsigned int counter; - - counter = (unsigned int)__atomic_add_unless(v, 1, 0); - if (counter <= (unsigned int)INT_MAX) - return (int)counter; - - atomic_dec(v); - - return -EINVAL; -} - -/* Decrement the counter. Return the resulting value, or -EINVAL */ -static int atomic_dec_return_safe(atomic_t *v) -{ - int counter; - - counter = atomic_dec_return(v); - if (counter >= 0) - return counter; - - atomic_inc(v); - - return -EINVAL; -} - #define RBD_DRV_NAME "rbd" #define RBD_DRV_NAME_LONG "rbd (rados block device)" @@ -133,20 +100,21 @@ static int atomic_dec_return_safe(atomic_t *v) * block device image metadata (in-memory version) */ struct rbd_image_header { - /* These six fields never change for a given rbd image */ + /* These four fields never change for a given rbd image */ char *object_prefix; + u64 features; __u8 obj_order; __u8 crypt_type; __u8 comp_type; - u64 stripe_unit; - u64 stripe_count; - u64 features; /* Might be changeable someday? */ /* The remaining fields need to be updated occasionally */ u64 image_size; struct ceph_snap_context *snapc; - char *snap_names; /* format 1 only */ - u64 *snap_sizes; /* format 1 only */ + char *snap_names; + u64 *snap_sizes; + + u64 stripe_unit; + u64 stripe_count; }; /* @@ -257,7 +225,6 @@ struct rbd_obj_request { }; }; struct page **copyup_pages; - u32 copyup_page_count; struct ceph_osd_request *osd_req; @@ -290,7 +257,6 @@ struct rbd_img_request { struct rbd_obj_request *obj_request; /* obj req initiator */ }; struct page **copyup_pages; - u32 copyup_page_count; spinlock_t completion_lock;/* protects next_completion */ u32 next_completion; rbd_img_callback_t callback; @@ -345,7 +311,6 @@ struct rbd_device { struct rbd_spec *parent_spec; u64 parent_overlap; - atomic_t parent_ref; struct rbd_device *parent; /* protects updating the header */ @@ -394,8 +359,7 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count); static ssize_t rbd_remove(struct bus_type *bus, const char *buf, size_t count); -static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping); -static void rbd_spec_put(struct rbd_spec *spec); +static int rbd_dev_image_probe(struct rbd_device *rbd_dev); static struct bus_attribute rbd_bus_attrs[] = { __ATTR(add, S_IWUSR, NULL, rbd_add), @@ -462,8 +426,7 @@ static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); static int rbd_dev_refresh(struct rbd_device *rbd_dev); -static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); -static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev); +static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u64 snap_id); static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, @@ -519,8 +482,8 @@ static const struct block_device_operations rbd_bd_ops = { }; /* - * Initialize an rbd client instance. Success or not, this function - * consumes ceph_opts. + * Initialize an rbd client instance. + * We own *ceph_opts. */ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) { @@ -675,8 +638,7 @@ static int parse_rbd_opts_token(char *c, void *private) /* * Get a ceph client with specific addr and configuration, if one does - * not exist create it. Either way, ceph_opts is consumed by this - * function. + * not exist create it. */ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) { @@ -764,123 +726,88 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) } /* - * Fill an rbd image header with information from the given format 1 - * on-disk header. + * Create a new header structure, translate header format from the on-disk + * header. */ -static int rbd_header_from_disk(struct rbd_device *rbd_dev, +static int rbd_header_from_disk(struct rbd_image_header *header, struct rbd_image_header_ondisk *ondisk) { - struct rbd_image_header *header = &rbd_dev->header; - bool first_time = header->object_prefix == NULL; - struct ceph_snap_context *snapc; - char *object_prefix = NULL; - char *snap_names = NULL; - u64 *snap_sizes = NULL; u32 snap_count; + size_t len; size_t size; - int ret = -ENOMEM; u32 i; - /* Allocate this now to avoid having to handle failure below */ - - if (first_time) { - size_t len; + memset(header, 0, sizeof (*header)); - len = strnlen(ondisk->object_prefix, - sizeof (ondisk->object_prefix)); - object_prefix = kmalloc(len + 1, GFP_KERNEL); - if (!object_prefix) - return -ENOMEM; - memcpy(object_prefix, ondisk->object_prefix, len); - object_prefix[len] = '\0'; - } + snap_count = le32_to_cpu(ondisk->snap_count); - /* Allocate the snapshot context and fill it in */ + len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix)); + header->object_prefix = kmalloc(len + 1, GFP_KERNEL); + if (!header->object_prefix) + return -ENOMEM; + memcpy(header->object_prefix, ondisk->object_prefix, len); + header->object_prefix[len] = '\0'; - snap_count = le32_to_cpu(ondisk->snap_count); - snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); - if (!snapc) - goto out_err; - snapc->seq = le64_to_cpu(ondisk->snap_seq); if (snap_count) { - struct rbd_image_snap_ondisk *snaps; u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); - /* We'll keep a copy of the snapshot names... */ + /* Save a copy of the snapshot names */ - if (snap_names_len > (u64)SIZE_MAX) - goto out_2big; - snap_names = kmalloc(snap_names_len, GFP_KERNEL); - if (!snap_names) - goto out_err; - - /* ...as well as the array of their sizes. */ - - size = snap_count * sizeof (*header->snap_sizes); - snap_sizes = kmalloc(size, GFP_KERNEL); - if (!snap_sizes) + if (snap_names_len > (u64) SIZE_MAX) + return -EIO; + header->snap_names = kmalloc(snap_names_len, GFP_KERNEL); + if (!header->snap_names) goto out_err; - /* - * Copy the names, and fill in each snapshot's id - * and size. - * - * Note that rbd_dev_v1_header_info() guarantees the - * ondisk buffer we're working with has + * Note that rbd_dev_v1_header_read() guarantees + * the ondisk buffer we're working with has * snap_names_len bytes beyond the end of the * snapshot id array, this memcpy() is safe. */ - memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len); - snaps = ondisk->snaps; - for (i = 0; i < snap_count; i++) { - snapc->snaps[i] = le64_to_cpu(snaps[i].id); - snap_sizes[i] = le64_to_cpu(snaps[i].image_size); - } - } + memcpy(header->snap_names, &ondisk->snaps[snap_count], + snap_names_len); - /* We won't fail any more, fill in the header */ + /* Record each snapshot's size */ - down_write(&rbd_dev->header_rwsem); - if (first_time) { - header->object_prefix = object_prefix; - header->obj_order = ondisk->options.order; - header->crypt_type = ondisk->options.crypt_type; - header->comp_type = ondisk->options.comp_type; - /* The rest aren't used for format 1 images */ - header->stripe_unit = 0; - header->stripe_count = 0; - header->features = 0; + size = snap_count * sizeof (*header->snap_sizes); + header->snap_sizes = kmalloc(size, GFP_KERNEL); + if (!header->snap_sizes) + goto out_err; + for (i = 0; i < snap_count; i++) + header->snap_sizes[i] = + le64_to_cpu(ondisk->snaps[i].image_size); } else { - ceph_put_snap_context(header->snapc); - kfree(header->snap_names); - kfree(header->snap_sizes); + header->snap_names = NULL; + header->snap_sizes = NULL; } - /* The remaining fields always get updated (when we refresh) */ + header->features = 0; /* No features support in v1 images */ + header->obj_order = ondisk->options.order; + header->crypt_type = ondisk->options.crypt_type; + header->comp_type = ondisk->options.comp_type; - header->image_size = le64_to_cpu(ondisk->image_size); - header->snapc = snapc; - header->snap_names = snap_names; - header->snap_sizes = snap_sizes; + /* Allocate and fill in the snapshot context */ - /* Make sure mapping size is consistent with header info */ - - if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time) - if (rbd_dev->mapping.size != header->image_size) - rbd_dev->mapping.size = header->image_size; + header->image_size = le64_to_cpu(ondisk->image_size); - up_write(&rbd_dev->header_rwsem); + header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); + if (!header->snapc) + goto out_err; + header->snapc->seq = le64_to_cpu(ondisk->snap_seq); + for (i = 0; i < snap_count; i++) + header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id); return 0; -out_2big: - ret = -EIO; + out_err: - kfree(snap_sizes); - kfree(snap_names); - ceph_put_snap_context(snapc); - kfree(object_prefix); + kfree(header->snap_sizes); + header->snap_sizes = NULL; + kfree(header->snap_names); + header->snap_names = NULL; + kfree(header->object_prefix); + header->object_prefix = NULL; - return ret; + return -ENOMEM; } static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) @@ -1007,11 +934,20 @@ static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id, static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) { - u64 snap_id = rbd_dev->spec->snap_id; + const char *snap_name = rbd_dev->spec->snap_name; + u64 snap_id; u64 size = 0; u64 features = 0; int ret; + if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) { + snap_id = rbd_snap_id_by_name(rbd_dev, snap_name); + if (snap_id == CEPH_NOSNAP) + return -ENOENT; + } else { + snap_id = CEPH_NOSNAP; + } + ret = rbd_snap_size(rbd_dev, snap_id, &size); if (ret) return ret; @@ -1022,6 +958,11 @@ static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) rbd_dev->mapping.size = size; rbd_dev->mapping.features = features; + /* If we are mapping a snapshot it must be marked read-only */ + + if (snap_id != CEPH_NOSNAP) + rbd_dev->mapping.read_only = true; + return 0; } @@ -1029,6 +970,14 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) { rbd_dev->mapping.size = 0; rbd_dev->mapping.features = 0; + rbd_dev->mapping.read_only = true; +} + +static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev) +{ + rbd_dev->mapping.size = 0; + rbd_dev->mapping.features = 0; + rbd_dev->mapping.read_only = true; } static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) @@ -1036,16 +985,12 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) char *name; u64 segment; int ret; - char *name_format; name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); if (!name) return NULL; segment = offset >> rbd_dev->header.obj_order; - name_format = "%s.%012llx"; - if (rbd_dev->image_format == 2) - name_format = "%s.%016llx"; - ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format, + ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx", rbd_dev->header.object_prefix, segment); if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) { pr_err("error formatting segment name for #%llu (%d)\n", @@ -1397,18 +1342,20 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request) kref_put(&obj_request->kref, rbd_obj_request_destroy); } -static bool img_request_child_test(struct rbd_img_request *img_request); -static void rbd_parent_request_destroy(struct kref *kref); +static void rbd_img_request_get(struct rbd_img_request *img_request) +{ + dout("%s: img %p (was %d)\n", __func__, img_request, + atomic_read(&img_request->kref.refcount)); + kref_get(&img_request->kref); +} + static void rbd_img_request_destroy(struct kref *kref); static void rbd_img_request_put(struct rbd_img_request *img_request) { rbd_assert(img_request != NULL); dout("%s: img %p (was %d)\n", __func__, img_request, atomic_read(&img_request->kref.refcount)); - if (img_request_child_test(img_request)) - kref_put(&img_request->kref, rbd_parent_request_destroy); - else - kref_put(&img_request->kref, rbd_img_request_destroy); + kref_put(&img_request->kref, rbd_img_request_destroy); } static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, @@ -1525,12 +1472,6 @@ static void img_request_child_set(struct rbd_img_request *img_request) smp_mb(); } -static void img_request_child_clear(struct rbd_img_request *img_request) -{ - clear_bit(IMG_REQ_CHILD, &img_request->flags); - smp_mb(); -} - static bool img_request_child_test(struct rbd_img_request *img_request) { smp_mb(); @@ -1543,12 +1484,6 @@ static void img_request_layered_set(struct rbd_img_request *img_request) smp_mb(); } -static void img_request_layered_clear(struct rbd_img_request *img_request) -{ - clear_bit(IMG_REQ_LAYERED, &img_request->flags); - smp_mb(); -} - static bool img_request_layered_test(struct rbd_img_request *img_request) { smp_mb(); @@ -1892,74 +1827,6 @@ static void rbd_obj_request_destroy(struct kref *kref) kmem_cache_free(rbd_obj_request_cache, obj_request); } -/* It's OK to call this for a device with no parent */ - -static void rbd_spec_put(struct rbd_spec *spec); -static void rbd_dev_unparent(struct rbd_device *rbd_dev) -{ - rbd_dev_remove_parent(rbd_dev); - rbd_spec_put(rbd_dev->parent_spec); - rbd_dev->parent_spec = NULL; - rbd_dev->parent_overlap = 0; -} - -/* - * Parent image reference counting is used to determine when an - * image's parent fields can be safely torn down--after there are no - * more in-flight requests to the parent image. When the last - * reference is dropped, cleaning them up is safe. - */ -static void rbd_dev_parent_put(struct rbd_device *rbd_dev) -{ - int counter; - - if (!rbd_dev->parent_spec) - return; - - counter = atomic_dec_return_safe(&rbd_dev->parent_ref); - if (counter > 0) - return; - - /* Last reference; clean up parent data structures */ - - if (!counter) - rbd_dev_unparent(rbd_dev); - else - rbd_warn(rbd_dev, "parent reference underflow\n"); -} - -/* - * If an image has a non-zero parent overlap, get a reference to its - * parent. - * - * We must get the reference before checking for the overlap to - * coordinate properly with zeroing the parent overlap in - * rbd_dev_v2_parent_info() when an image gets flattened. We - * drop it again if there is no overlap. - * - * Returns true if the rbd device has a parent with a non-zero - * overlap and a reference for it was successfully taken, or - * false otherwise. - */ -static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) -{ - int counter; - - if (!rbd_dev->parent_spec) - return false; - - counter = atomic_inc_return_safe(&rbd_dev->parent_ref); - if (counter > 0 && rbd_dev->parent_overlap) - return true; - - /* Image was flattened, but parent is not yet torn down */ - - if (counter < 0) - rbd_warn(rbd_dev, "parent reference overflow\n"); - - return false; -} - /* * Caller is responsible for filling in the list of object requests * that comprises the image request, and the Linux request pointer @@ -1968,7 +1835,8 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) static struct rbd_img_request *rbd_img_request_create( struct rbd_device *rbd_dev, u64 offset, u64 length, - bool write_request) + bool write_request, + bool child_request) { struct rbd_img_request *img_request; @@ -1993,7 +1861,9 @@ static struct rbd_img_request *rbd_img_request_create( } else { img_request->snap_id = rbd_dev->spec->snap_id; } - if (rbd_dev_parent_get(rbd_dev)) + if (child_request) + img_request_child_set(img_request); + if (rbd_dev->parent_spec) img_request_layered_set(img_request); spin_lock_init(&img_request->completion_lock); img_request->next_completion = 0; @@ -2003,6 +1873,9 @@ static struct rbd_img_request *rbd_img_request_create( INIT_LIST_HEAD(&img_request->obj_requests); kref_init(&img_request->kref); + rbd_img_request_get(img_request); /* Avoid a warning */ + rbd_img_request_put(img_request); /* TEMPORARY */ + dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev, write_request ? "write" : "read", offset, length, img_request); @@ -2024,52 +1897,13 @@ static void rbd_img_request_destroy(struct kref *kref) rbd_img_obj_request_del(img_request, obj_request); rbd_assert(img_request->obj_request_count == 0); - if (img_request_layered_test(img_request)) { - img_request_layered_clear(img_request); - rbd_dev_parent_put(img_request->rbd_dev); - } - if (img_request_write_test(img_request)) ceph_put_snap_context(img_request->snapc); - kmem_cache_free(rbd_img_request_cache, img_request); -} - -static struct rbd_img_request *rbd_parent_request_create( - struct rbd_obj_request *obj_request, - u64 img_offset, u64 length) -{ - struct rbd_img_request *parent_request; - struct rbd_device *rbd_dev; - - rbd_assert(obj_request->img_request); - rbd_dev = obj_request->img_request->rbd_dev; - - parent_request = rbd_img_request_create(rbd_dev->parent, - img_offset, length, false); - if (!parent_request) - return NULL; - - img_request_child_set(parent_request); - rbd_obj_request_get(obj_request); - parent_request->obj_request = obj_request; - - return parent_request; -} - -static void rbd_parent_request_destroy(struct kref *kref) -{ - struct rbd_img_request *parent_request; - struct rbd_obj_request *orig_request; - - parent_request = container_of(kref, struct rbd_img_request, kref); - orig_request = parent_request->obj_request; - - parent_request->obj_request = NULL; - rbd_obj_request_put(orig_request); - img_request_child_clear(parent_request); + if (img_request_child_test(img_request)) + rbd_obj_request_put(img_request->obj_request); - rbd_img_request_destroy(kref); + kmem_cache_free(rbd_img_request_cache, img_request); } static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) @@ -2252,17 +2086,13 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, obj_request->pages, length, offset & ~PAGE_MASK, false, false); - /* - * set obj_request->img_request before formatting - * the osd_request so that it gets the right snapc - */ - rbd_img_obj_request_add(img_request, obj_request); if (write_request) rbd_osd_req_format_write(obj_request); else rbd_osd_req_format_read(obj_request); obj_request->img_offset = img_offset; + rbd_img_obj_request_add(img_request, obj_request); img_offset += length; resid -= length; @@ -2284,7 +2114,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; struct rbd_device *rbd_dev; - struct page **pages; + u64 length; u32 page_count; rbd_assert(obj_request->type == OBJ_REQUEST_BIO); @@ -2294,14 +2124,12 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) rbd_dev = img_request->rbd_dev; rbd_assert(rbd_dev); + length = (u64)1 << rbd_dev->header.obj_order; + page_count = (u32)calc_pages_for(0, length); - pages = obj_request->copyup_pages; - rbd_assert(pages != NULL); + rbd_assert(obj_request->copyup_pages); + ceph_release_page_vector(obj_request->copyup_pages, page_count); obj_request->copyup_pages = NULL; - page_count = obj_request->copyup_page_count; - rbd_assert(page_count); - obj_request->copyup_page_count = 0; - ceph_release_page_vector(pages, page_count); /* * We want the transfer count to reflect the size of the @@ -2325,11 +2153,9 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) struct ceph_osd_client *osdc; struct rbd_device *rbd_dev; struct page **pages; - u32 page_count; - int img_result; - u64 parent_length; - u64 offset; - u64 length; + int result; + u64 obj_size; + u64 xferred; rbd_assert(img_request_child_test(img_request)); @@ -2338,74 +2164,46 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) pages = img_request->copyup_pages; rbd_assert(pages != NULL); img_request->copyup_pages = NULL; - page_count = img_request->copyup_page_count; - rbd_assert(page_count); - img_request->copyup_page_count = 0; orig_request = img_request->obj_request; rbd_assert(orig_request != NULL); - rbd_assert(obj_request_type_valid(orig_request->type)); - img_result = img_request->result; - parent_length = img_request->length; - rbd_assert(parent_length == img_request->xferred); - rbd_img_request_put(img_request); + rbd_assert(orig_request->type == OBJ_REQUEST_BIO); + result = img_request->result; + obj_size = img_request->length; + xferred = img_request->xferred; - rbd_assert(orig_request->img_request); - rbd_dev = orig_request->img_request->rbd_dev; + rbd_dev = img_request->rbd_dev; rbd_assert(rbd_dev); + rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order); - /* - * If the overlap has become 0 (most likely because the - * image has been flattened) we need to free the pages - * and re-submit the original write request. - */ - if (!rbd_dev->parent_overlap) { - struct ceph_osd_client *osdc; - - ceph_release_page_vector(pages, page_count); - osdc = &rbd_dev->rbd_client->client->osdc; - img_result = rbd_obj_request_submit(osdc, orig_request); - if (!img_result) - return; - } + rbd_img_request_put(img_request); - if (img_result) + if (result) goto out_err; - /* - * The original osd request is of no use to use any more. - * We need a new one that can hold the two ops in a copyup - * request. Allocate the new copyup osd request for the - * original request, and release the old one. - */ - img_result = -ENOMEM; + /* Allocate the new copyup osd request for the original request */ + + result = -ENOMEM; + rbd_assert(!orig_request->osd_req); osd_req = rbd_osd_req_create_copyup(orig_request); if (!osd_req) goto out_err; - rbd_osd_req_destroy(orig_request->osd_req); orig_request->osd_req = osd_req; orig_request->copyup_pages = pages; - orig_request->copyup_page_count = page_count; /* Initialize the copyup op */ osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); - osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0, + osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0, false, false); /* Then the original write request op */ - offset = orig_request->offset; - length = orig_request->length; osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE, - offset, length, 0, 0); - if (orig_request->type == OBJ_REQUEST_BIO) - osd_req_op_extent_osd_data_bio(osd_req, 1, - orig_request->bio_list, length); - else - osd_req_op_extent_osd_data_pages(osd_req, 1, - orig_request->pages, length, - offset & ~PAGE_MASK, false, false); + orig_request->offset, + orig_request->length, 0, 0); + osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list, + orig_request->length); rbd_osd_req_format_write(orig_request); @@ -2413,13 +2211,13 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) orig_request->callback = rbd_img_obj_copyup_callback; osdc = &rbd_dev->rbd_client->client->osdc; - img_result = rbd_obj_request_submit(osdc, orig_request); - if (!img_result) + result = rbd_obj_request_submit(osdc, orig_request); + if (!result) return; out_err: /* Record the error code and complete the request */ - orig_request->result = img_result; + orig_request->result = result; orig_request->xferred = 0; obj_request_done_set(orig_request); rbd_obj_request_complete(orig_request); @@ -2451,13 +2249,22 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) int result; rbd_assert(obj_request_img_data_test(obj_request)); - rbd_assert(obj_request_type_valid(obj_request->type)); + rbd_assert(obj_request->type == OBJ_REQUEST_BIO); img_request = obj_request->img_request; rbd_assert(img_request != NULL); rbd_dev = img_request->rbd_dev; rbd_assert(rbd_dev->parent != NULL); + /* + * First things first. The original osd request is of no + * use to use any more, we'll need a new one that can hold + * the two ops in a copyup request. We'll get that later, + * but for now we can release the old one. + */ + rbd_osd_req_destroy(obj_request->osd_req); + obj_request->osd_req = NULL; + /* * Determine the byte range covered by the object in the * child image to which the original request was to be sent. @@ -2488,16 +2295,18 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) } result = -ENOMEM; - parent_request = rbd_parent_request_create(obj_request, - img_offset, length); + parent_request = rbd_img_request_create(rbd_dev->parent, + img_offset, length, + false, true); if (!parent_request) goto out_err; + rbd_obj_request_get(obj_request); + parent_request->obj_request = obj_request; result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); if (result) goto out_err; parent_request->copyup_pages = pages; - parent_request->copyup_page_count = page_count; parent_request->callback = rbd_img_obj_parent_read_full_callback; result = rbd_img_request_submit(parent_request); @@ -2505,7 +2314,6 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) return 0; parent_request->copyup_pages = NULL; - parent_request->copyup_page_count = 0; parent_request->obj_request = NULL; rbd_obj_request_put(obj_request); out_err: @@ -2523,7 +2331,6 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) { struct rbd_obj_request *orig_request; - struct rbd_device *rbd_dev; int result; rbd_assert(!obj_request_img_data_test(obj_request)); @@ -2546,21 +2353,8 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) obj_request->xferred, obj_request->length); rbd_obj_request_put(obj_request); - /* - * If the overlap has become 0 (most likely because the - * image has been flattened) we need to free the pages - * and re-submit the original write request. - */ - rbd_dev = orig_request->img_request->rbd_dev; - if (!rbd_dev->parent_overlap) { - struct ceph_osd_client *osdc; - - rbd_obj_request_put(orig_request); - osdc = &rbd_dev->rbd_client->client->osdc; - result = rbd_obj_request_submit(osdc, orig_request); - if (!result) - return; - } + rbd_assert(orig_request); + rbd_assert(orig_request->img_request); /* * Our only purpose here is to determine whether the object @@ -2718,36 +2512,14 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) struct rbd_obj_request *obj_request; struct rbd_device *rbd_dev; u64 obj_end; - u64 img_xferred; - int img_result; rbd_assert(img_request_child_test(img_request)); - /* First get what we need from the image request and release it */ - obj_request = img_request->obj_request; - img_xferred = img_request->xferred; - img_result = img_request->result; - rbd_img_request_put(img_request); - - /* - * If the overlap has become 0 (most likely because the - * image has been flattened) we need to re-submit the - * original request. - */ rbd_assert(obj_request); rbd_assert(obj_request->img_request); - rbd_dev = obj_request->img_request->rbd_dev; - if (!rbd_dev->parent_overlap) { - struct ceph_osd_client *osdc; - osdc = &rbd_dev->rbd_client->client->osdc; - img_result = rbd_obj_request_submit(osdc, obj_request); - if (!img_result) - return; - } - - obj_request->result = img_result; + obj_request->result = img_request->result; if (obj_request->result) goto out; @@ -2760,6 +2532,7 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) */ rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); obj_end = obj_request->img_offset + obj_request->length; + rbd_dev = obj_request->img_request->rbd_dev; if (obj_end > rbd_dev->parent_overlap) { u64 xferred = 0; @@ -2767,39 +2540,43 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) xferred = rbd_dev->parent_overlap - obj_request->img_offset; - obj_request->xferred = min(img_xferred, xferred); + obj_request->xferred = min(img_request->xferred, xferred); } else { - obj_request->xferred = img_xferred; + obj_request->xferred = img_request->xferred; } out: + rbd_img_request_put(img_request); rbd_img_obj_request_read_callback(obj_request); rbd_obj_request_complete(obj_request); } static void rbd_img_parent_read(struct rbd_obj_request *obj_request) { + struct rbd_device *rbd_dev; struct rbd_img_request *img_request; int result; rbd_assert(obj_request_img_data_test(obj_request)); rbd_assert(obj_request->img_request != NULL); rbd_assert(obj_request->result == (s32) -ENOENT); - rbd_assert(obj_request_type_valid(obj_request->type)); + rbd_assert(obj_request->type == OBJ_REQUEST_BIO); + rbd_dev = obj_request->img_request->rbd_dev; + rbd_assert(rbd_dev->parent != NULL); /* rbd_read_finish(obj_request, obj_request->length); */ - img_request = rbd_parent_request_create(obj_request, + img_request = rbd_img_request_create(rbd_dev->parent, obj_request->img_offset, - obj_request->length); + obj_request->length, + false, true); result = -ENOMEM; if (!img_request) goto out_err; - if (obj_request->type == OBJ_REQUEST_BIO) - result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, - obj_request->bio_list); - else - result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES, - obj_request->pages); + rbd_obj_request_get(obj_request); + img_request->obj_request = obj_request; + + result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, + obj_request->bio_list); if (result) goto out_err; @@ -2849,7 +2626,6 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id) static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) { struct rbd_device *rbd_dev = (struct rbd_device *)data; - int ret; if (!rbd_dev) return; @@ -2857,9 +2633,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, rbd_dev->header_name, (unsigned long long)notify_id, (unsigned int)opcode); - ret = rbd_dev_refresh(rbd_dev); - if (ret) - rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret); + (void)rbd_dev_refresh(rbd_dev); rbd_obj_notify_ack(rbd_dev, notify_id); } @@ -2868,7 +2642,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) * Request sync osd watch/unwatch. The value of "start" determines * whether a watch request is being initiated or torn down. */ -static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start) +static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; @@ -2902,7 +2676,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start) rbd_dev->watch_request->osd_req); osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, - rbd_dev->watch_event->cookie, 0, start ? 1 : 0); + rbd_dev->watch_event->cookie, 0, start); rbd_osd_req_format_write(obj_request); ret = rbd_obj_request_submit(osdc, obj_request); @@ -3095,16 +2869,9 @@ static void rbd_request_fn(struct request_queue *q) goto end_request; /* Shouldn't happen */ } - result = -EIO; - if (offset + length > rbd_dev->mapping.size) { - rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n", - offset, length, rbd_dev->mapping.size); - goto end_request; - } - result = -ENOMEM; img_request = rbd_img_request_create(rbd_dev, offset, length, - write_request); + write_request, false); if (!img_request) goto end_request; @@ -3255,11 +3022,17 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, } /* - * Read the complete header for the given rbd device. On successful - * return, the rbd_dev->header field will contain up-to-date - * information about the image. + * Read the complete header for the given rbd device. + * + * Returns a pointer to a dynamically-allocated buffer containing + * the complete and validated header. Caller can pass the address + * of a variable that will be filled in with the version of the + * header object at the time it was read. + * + * Returns a pointer-coded errno if a failure occurs. */ -static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) +static struct rbd_image_header_ondisk * +rbd_dev_v1_header_read(struct rbd_device *rbd_dev) { struct rbd_image_header_ondisk *ondisk = NULL; u32 snap_count = 0; @@ -3284,22 +3057,22 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) size += names_size; ondisk = kmalloc(size, GFP_KERNEL); if (!ondisk) - return -ENOMEM; + return ERR_PTR(-ENOMEM); ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, 0, size, ondisk); if (ret < 0) - goto out; + goto out_err; if ((size_t)ret < size) { ret = -ENXIO; rbd_warn(rbd_dev, "short header read (want %zd got %d)", size, ret); - goto out; + goto out_err; } if (!rbd_dev_ondisk_valid(ondisk)) { ret = -ENXIO; rbd_warn(rbd_dev, "invalid header"); - goto out; + goto out_err; } names_size = le64_to_cpu(ondisk->snap_names_len); @@ -3307,13 +3080,85 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) snap_count = le32_to_cpu(ondisk->snap_count); } while (snap_count != want_count); - ret = rbd_header_from_disk(rbd_dev, ondisk); -out: + return ondisk; + +out_err: + kfree(ondisk); + + return ERR_PTR(ret); +} + +/* + * reload the ondisk the header + */ +static int rbd_read_header(struct rbd_device *rbd_dev, + struct rbd_image_header *header) +{ + struct rbd_image_header_ondisk *ondisk; + int ret; + + ondisk = rbd_dev_v1_header_read(rbd_dev); + if (IS_ERR(ondisk)) + return PTR_ERR(ondisk); + ret = rbd_header_from_disk(header, ondisk); kfree(ondisk); return ret; } +static void rbd_update_mapping_size(struct rbd_device *rbd_dev) +{ + if (rbd_dev->spec->snap_id != CEPH_NOSNAP) + return; + + if (rbd_dev->mapping.size != rbd_dev->header.image_size) { + sector_t size; + + rbd_dev->mapping.size = rbd_dev->header.image_size; + size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; + dout("setting size to %llu sectors", (unsigned long long)size); + set_capacity(rbd_dev->disk, size); + } +} + +/* + * only read the first part of the ondisk header, without the snaps info + */ +static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev) +{ + int ret; + struct rbd_image_header h; + + ret = rbd_read_header(rbd_dev, &h); + if (ret < 0) + return ret; + + down_write(&rbd_dev->header_rwsem); + + /* Update image size, and check for resize of mapped image */ + rbd_dev->header.image_size = h.image_size; + rbd_update_mapping_size(rbd_dev); + + /* rbd_dev->header.object_prefix shouldn't change */ + kfree(rbd_dev->header.snap_sizes); + kfree(rbd_dev->header.snap_names); + /* osd requests may still refer to snapc */ + ceph_put_snap_context(rbd_dev->header.snapc); + + rbd_dev->header.image_size = h.image_size; + rbd_dev->header.snapc = h.snapc; + rbd_dev->header.snap_names = h.snap_names; + rbd_dev->header.snap_sizes = h.snap_sizes; + /* Free the extra copy of the object prefix */ + if (strcmp(rbd_dev->header.object_prefix, h.object_prefix)) + rbd_warn(rbd_dev, "object prefix changed (ignoring)"); + kfree(h.object_prefix); + + up_write(&rbd_dev->header_rwsem); + + return ret; +} + /* * Clear the rbd device's EXISTS flag if the snapshot it's mapped to * has disappeared from the (just updated) snapshot context. @@ -3335,29 +3180,26 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev) static int rbd_dev_refresh(struct rbd_device *rbd_dev) { - u64 mapping_size; + u64 image_size; int ret; rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); - mapping_size = rbd_dev->mapping.size; + image_size = rbd_dev->header.image_size; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); if (rbd_dev->image_format == 1) - ret = rbd_dev_v1_header_info(rbd_dev); + ret = rbd_dev_v1_refresh(rbd_dev); else - ret = rbd_dev_v2_header_info(rbd_dev); + ret = rbd_dev_v2_refresh(rbd_dev); /* If it's a mapped snapshot, validate its EXISTS flag */ rbd_exists_validate(rbd_dev); mutex_unlock(&ctl_mutex); - if (mapping_size != rbd_dev->mapping.size) { - sector_t size; - - size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; - dout("setting size to %llu sectors", (unsigned long long)size); - set_capacity(rbd_dev->disk, size); + if (ret) + rbd_warn(rbd_dev, "got notification but failed to " + " update snaps: %d\n", ret); + if (image_size != rbd_dev->header.image_size) revalidate_disk(rbd_dev->disk); - } return ret; } @@ -3561,8 +3403,6 @@ static ssize_t rbd_image_refresh(struct device *dev, int ret; ret = rbd_dev_refresh(rbd_dev); - if (ret) - rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret); return ret < 0 ? ret : size; } @@ -3661,7 +3501,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, spin_lock_init(&rbd_dev->lock); rbd_dev->flags = 0; - atomic_set(&rbd_dev->parent_ref, 0); INIT_LIST_HEAD(&rbd_dev->node); init_rwsem(&rbd_dev->header_rwsem); @@ -3811,7 +3650,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) __le64 snapid; void *p; void *end; - u64 pool_id; char *image_id; u64 overlap; int ret; @@ -3842,37 +3680,18 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) p = reply_buf; end = reply_buf + ret; ret = -ERANGE; - ceph_decode_64_safe(&p, end, pool_id, out_err); - if (pool_id == CEPH_NOPOOL) { - /* - * Either the parent never existed, or we have - * record of it but the image got flattened so it no - * longer has a parent. When the parent of a - * layered image disappears we immediately set the - * overlap to 0. The effect of this is that all new - * requests will be treated as if the image had no - * parent. - */ - if (rbd_dev->parent_overlap) { - rbd_dev->parent_overlap = 0; - smp_mb(); - rbd_dev_parent_put(rbd_dev); - pr_info("%s: clone image has been flattened\n", - rbd_dev->disk->disk_name); - } - + ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); + if (parent_spec->pool_id == CEPH_NOPOOL) goto out; /* No parent? No problem. */ - } /* The ceph file layout needs to fit pool id in 32 bits */ ret = -EIO; - if (pool_id > (u64)U32_MAX) { + if (parent_spec->pool_id > (u64)U32_MAX) { rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", - (unsigned long long)pool_id, U32_MAX); + (unsigned long long)parent_spec->pool_id, U32_MAX); goto out_err; } - parent_spec->pool_id = pool_id; image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); if (IS_ERR(image_id)) { @@ -3883,14 +3702,9 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err); ceph_decode_64_safe(&p, end, overlap, out_err); - if (overlap) { - rbd_spec_put(rbd_dev->parent_spec); - rbd_dev->parent_spec = parent_spec; - parent_spec = NULL; /* rbd_dev now owns this */ - rbd_dev->parent_overlap = overlap; - } else { - rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n"); - } + rbd_dev->parent_overlap = overlap; + rbd_dev->parent_spec = parent_spec; + parent_spec = NULL; /* rbd_dev now owns this */ out: ret = 0; out_err: @@ -4188,7 +4002,6 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) for (i = 0; i < snap_count; i++) snapc->snaps[i] = ceph_decode_64(&p); - ceph_put_snap_context(rbd_dev->header.snapc); rbd_dev->header.snapc = snapc; dout(" snap context seq = %llu, snap_count = %u\n", @@ -4240,9 +4053,8 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, return snap_name; } -static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) +static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) { - bool first_time = rbd_dev->header.object_prefix == NULL; int ret; down_write(&rbd_dev->header_rwsem); @@ -4250,46 +4062,12 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) ret = rbd_dev_v2_image_size(rbd_dev); if (ret) goto out; - - if (first_time) { - ret = rbd_dev_v2_header_onetime(rbd_dev); - if (ret) - goto out; - } - - /* - * If the image supports layering, get the parent info. We - * need to probe the first time regardless. Thereafter we - * only need to if there's a parent, to see if it has - * disappeared due to the mapped image getting flattened. - */ - if (rbd_dev->header.features & RBD_FEATURE_LAYERING && - (first_time || rbd_dev->parent_spec)) { - bool warn; - - ret = rbd_dev_v2_parent_info(rbd_dev); - if (ret) - goto out; - - /* - * Print a warning if this is the initial probe and - * the image has a parent. Don't print it if the - * image now being probed is itself a parent. We - * can tell at this point because we won't know its - * pool name yet (just its pool id). - */ - warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name; - if (first_time && warn) - rbd_warn(rbd_dev, "WARNING: kernel layering " - "is EXPERIMENTAL!"); - } - - if (rbd_dev->spec->snap_id == CEPH_NOSNAP) - if (rbd_dev->mapping.size != rbd_dev->header.image_size) - rbd_dev->mapping.size = rbd_dev->header.image_size; + rbd_update_mapping_size(rbd_dev); ret = rbd_dev_v2_snap_context(rbd_dev); dout("rbd_dev_v2_snap_context returned %d\n", ret); + if (ret) + goto out; out: up_write(&rbd_dev->header_rwsem); @@ -4706,18 +4484,16 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) return ret; } -/* - * Undo whatever state changes are made by v1 or v2 header info - * call. - */ +/* Undo whatever state changes are made by v1 or v2 image probe */ + static void rbd_dev_unprobe(struct rbd_device *rbd_dev) { struct rbd_image_header *header; - /* Drop parent reference unless it's already been done (or none) */ - - if (rbd_dev->parent_overlap) - rbd_dev_parent_put(rbd_dev); + rbd_dev_remove_parent(rbd_dev); + rbd_spec_put(rbd_dev->parent_spec); + rbd_dev->parent_spec = NULL; + rbd_dev->parent_overlap = 0; /* Free dynamic fields from the header, then zero it out */ @@ -4729,22 +4505,72 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) memset(header, 0, sizeof (*header)); } -static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) +static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) { int ret; + /* Populate rbd image metadata */ + + ret = rbd_read_header(rbd_dev, &rbd_dev->header); + if (ret < 0) + goto out_err; + + /* Version 1 images have no parent (no layering) */ + + rbd_dev->parent_spec = NULL; + rbd_dev->parent_overlap = 0; + + dout("discovered version 1 image, header name is %s\n", + rbd_dev->header_name); + + return 0; + +out_err: + kfree(rbd_dev->header_name); + rbd_dev->header_name = NULL; + kfree(rbd_dev->spec->image_id); + rbd_dev->spec->image_id = NULL; + + return ret; +} + +static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) +{ + int ret; + + ret = rbd_dev_v2_image_size(rbd_dev); + if (ret) + goto out_err; + + /* Get the object prefix (a.k.a. block_name) for the image */ + ret = rbd_dev_v2_object_prefix(rbd_dev); if (ret) goto out_err; - /* - * Get the and check features for the image. Currently the - * features are assumed to never change. - */ + /* Get the and check features for the image */ + ret = rbd_dev_v2_features(rbd_dev); if (ret) goto out_err; + /* If the image supports layering, get the parent info */ + + if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { + ret = rbd_dev_v2_parent_info(rbd_dev); + if (ret) + goto out_err; + + /* + * Don't print a warning for parent images. We can + * tell this point because we won't know its pool + * name yet (just its pool id). + */ + if (rbd_dev->spec->pool_name) + rbd_warn(rbd_dev, "WARNING: kernel layering " + "is EXPERIMENTAL!"); + } + /* If the image supports fancy striping, get its parameters */ if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { @@ -4752,11 +4578,28 @@ static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) if (ret < 0) goto out_err; } - /* No support for crypto and compression type format 2 images */ + + /* crypto and compression type aren't (yet) supported for v2 images */ + + rbd_dev->header.crypt_type = 0; + rbd_dev->header.comp_type = 0; + + /* Get the snapshot context, plus the header version */ + + ret = rbd_dev_v2_snap_context(rbd_dev); + if (ret) + goto out_err; + + dout("discovered version 2 image, header name is %s\n", + rbd_dev->header_name); return 0; out_err: - rbd_dev->header.features = 0; + rbd_dev->parent_overlap = 0; + rbd_spec_put(rbd_dev->parent_spec); + rbd_dev->parent_spec = NULL; + kfree(rbd_dev->header_name); + rbd_dev->header_name = NULL; kfree(rbd_dev->header.object_prefix); rbd_dev->header.object_prefix = NULL; @@ -4785,16 +4628,15 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) if (!parent) goto out_err; - ret = rbd_dev_image_probe(parent, false); + ret = rbd_dev_image_probe(parent); if (ret < 0) goto out_err; rbd_dev->parent = parent; - atomic_set(&rbd_dev->parent_ref, 1); return 0; out_err: if (parent) { - rbd_dev_unparent(rbd_dev); + rbd_spec_put(rbd_dev->parent_spec); kfree(rbd_dev->header_name); rbd_dev_destroy(parent); } else { @@ -4809,6 +4651,10 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) { int ret; + ret = rbd_dev_mapping_set(rbd_dev); + if (ret) + return ret; + /* generate unique id: find highest unique id, add one */ rbd_dev_id_get(rbd_dev); @@ -4830,17 +4676,13 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) if (ret) goto err_out_blkdev; - ret = rbd_dev_mapping_set(rbd_dev); - if (ret) - goto err_out_disk; - set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); - ret = rbd_bus_add_dev(rbd_dev); if (ret) - goto err_out_mapping; + goto err_out_disk; /* Everything's ready. Announce the disk to the world. */ + set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); add_disk(rbd_dev->disk); @@ -4849,8 +4691,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) return ret; -err_out_mapping: - rbd_dev_mapping_clear(rbd_dev); err_out_disk: rbd_free_disk(rbd_dev); err_out_blkdev: @@ -4891,7 +4731,12 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) static void rbd_dev_image_release(struct rbd_device *rbd_dev) { + int ret; + rbd_dev_unprobe(rbd_dev); + ret = rbd_dev_header_watch_sync(rbd_dev, 0); + if (ret) + rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); kfree(rbd_dev->header_name); rbd_dev->header_name = NULL; rbd_dev->image_format = 0; @@ -4903,20 +4748,18 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) /* * Probe for the existence of the header object for the given rbd - * device. If this image is the one being mapped (i.e., not a - * parent), initiate a watch on its header object before using that - * object to get detailed information about the rbd image. + * device. For format 2 images this includes determining the image + * id. */ -static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) +static int rbd_dev_image_probe(struct rbd_device *rbd_dev) { int ret; int tmp; /* - * Get the id from the image id object. Unless there's an - * error, rbd_dev->spec->image_id will be filled in with - * a dynamically-allocated string, and rbd_dev->image_format - * will be set to either 1 or 2. + * Get the id from the image id object. If it's not a + * format 2 image, we'll get ENOENT back, and we'll assume + * it's a format 1 image. */ ret = rbd_dev_image_id(rbd_dev); if (ret) @@ -4928,16 +4771,14 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) if (ret) goto err_out_format; - if (mapping) { - ret = rbd_dev_header_watch_sync(rbd_dev, true); - if (ret) - goto out_header_name; - } + ret = rbd_dev_header_watch_sync(rbd_dev, 1); + if (ret) + goto out_header_name; if (rbd_dev->image_format == 1) - ret = rbd_dev_v1_header_info(rbd_dev); + ret = rbd_dev_v1_probe(rbd_dev); else - ret = rbd_dev_v2_header_info(rbd_dev); + ret = rbd_dev_v2_probe(rbd_dev); if (ret) goto err_out_watch; @@ -4946,22 +4787,15 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) goto err_out_probe; ret = rbd_dev_probe_parent(rbd_dev); - if (ret) - goto err_out_probe; - - dout("discovered format %u image, header name is %s\n", - rbd_dev->image_format, rbd_dev->header_name); + if (!ret) + return 0; - return 0; err_out_probe: rbd_dev_unprobe(rbd_dev); err_out_watch: - if (mapping) { - tmp = rbd_dev_header_watch_sync(rbd_dev, false); - if (tmp) - rbd_warn(rbd_dev, "unable to tear down " - "watch request (%d)\n", tmp); - } + tmp = rbd_dev_header_watch_sync(rbd_dev, 0); + if (tmp) + rbd_warn(rbd_dev, "unable to tear down watch request\n"); out_header_name: kfree(rbd_dev->header_name); rbd_dev->header_name = NULL; @@ -4985,7 +4819,6 @@ static ssize_t rbd_add(struct bus_type *bus, struct rbd_spec *spec = NULL; struct rbd_client *rbdc; struct ceph_osd_client *osdc; - bool read_only; int rc = -ENOMEM; if (!try_module_get(THIS_MODULE)) @@ -4995,15 +4828,13 @@ static ssize_t rbd_add(struct bus_type *bus, rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); if (rc < 0) goto err_out_module; - read_only = rbd_opts->read_only; - kfree(rbd_opts); - rbd_opts = NULL; /* done with this */ rbdc = rbd_get_client(ceph_opts); if (IS_ERR(rbdc)) { rc = PTR_ERR(rbdc); goto err_out_args; } + ceph_opts = NULL; /* rbd_dev client now owns this */ /* pick the pool */ osdc = &rbdc->client->osdc; @@ -5027,29 +4858,27 @@ static ssize_t rbd_add(struct bus_type *bus, rbdc = NULL; /* rbd_dev now owns this */ spec = NULL; /* rbd_dev now owns this */ - rc = rbd_dev_image_probe(rbd_dev, true); + rbd_dev->mapping.read_only = rbd_opts->read_only; + kfree(rbd_opts); + rbd_opts = NULL; /* done with this */ + + rc = rbd_dev_image_probe(rbd_dev); if (rc < 0) goto err_out_rbd_dev; - /* If we are mapping a snapshot it must be marked read-only */ - - if (rbd_dev->spec->snap_id != CEPH_NOSNAP) - read_only = true; - rbd_dev->mapping.read_only = read_only; - rc = rbd_dev_device_setup(rbd_dev); - if (rc) { - rbd_dev_image_release(rbd_dev); - goto err_out_module; - } - - return count; + if (!rc) + return count; + rbd_dev_image_release(rbd_dev); err_out_rbd_dev: rbd_dev_destroy(rbd_dev); err_out_client: rbd_put_client(rbdc); err_out_args: + if (ceph_opts) + ceph_destroy_options(ceph_opts); + kfree(rbd_opts); rbd_spec_put(spec); err_out_module: module_put(THIS_MODULE); @@ -5082,7 +4911,7 @@ static void rbd_dev_device_release(struct device *dev) rbd_free_disk(rbd_dev); clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); - rbd_dev_mapping_clear(rbd_dev); + rbd_dev_clear_mapping(rbd_dev); unregister_blkdev(rbd_dev->major, rbd_dev->name); rbd_dev->major = 0; rbd_dev_id_put(rbd_dev); @@ -5149,13 +4978,10 @@ static ssize_t rbd_remove(struct bus_type *bus, spin_unlock_irq(&rbd_dev->lock); if (ret < 0) goto done; + ret = count; rbd_bus_del_dev(rbd_dev); - ret = rbd_dev_header_watch_sync(rbd_dev, false); - if (ret) - rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); rbd_dev_image_release(rbd_dev); module_put(THIS_MODULE); - ret = count; done: mutex_unlock(&ctl_mutex); diff --git a/trunk/drivers/block/xsysace.c b/trunk/drivers/block/xsysace.c index 3fd130fdfbc1..f8ef15f37c5e 100644 --- a/trunk/drivers/block/xsysace.c +++ b/trunk/drivers/block/xsysace.c @@ -1160,7 +1160,8 @@ static int ace_probe(struct platform_device *dev) dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); /* device id and bus width */ - if (of_property_read_u32(dev->dev.of_node, "port-number", &id)) + of_property_read_u32(dev->dev.of_node, "port-number", &id); + if (id < 0) id = 0; if (of_find_property(dev->dev.of_node, "8-bit", NULL)) bus_width = ACE_BUS_WIDTH_8; diff --git a/trunk/drivers/bluetooth/Kconfig b/trunk/drivers/bluetooth/Kconfig index 11a6104a1e4f..fdfd61a2d523 100644 --- a/trunk/drivers/bluetooth/Kconfig +++ b/trunk/drivers/bluetooth/Kconfig @@ -201,7 +201,7 @@ config BT_MRVL The core driver to support Marvell Bluetooth devices. This driver is required if you want to support - Marvell Bluetooth devices, such as 8688/8787/8797/8897. + Marvell Bluetooth devices, such as 8688/8787/8797. Say Y here to compile Marvell Bluetooth driver into the kernel or say M to compile it as module. @@ -214,7 +214,7 @@ config BT_MRVL_SDIO The driver for Marvell Bluetooth chipsets with SDIO interface. This driver is required if you want to use Marvell Bluetooth - devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8897 + devices with SDIO interface. Currently SD8688/SD8787/SD8797 chipsets are supported. Say Y here to compile support for Marvell BT-over-SDIO driver diff --git a/trunk/drivers/bluetooth/btmrvl_main.c b/trunk/drivers/bluetooth/btmrvl_main.c index 9a9f51875df5..3a4343b3bd6d 100644 --- a/trunk/drivers/bluetooth/btmrvl_main.c +++ b/trunk/drivers/bluetooth/btmrvl_main.c @@ -498,10 +498,6 @@ static int btmrvl_service_main_thread(void *data) add_wait_queue(&thread->wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) { - BT_DBG("main_thread: break from main thread"); - break; - } if (adapter->wakeup_tries || ((!adapter->int_count) && @@ -517,6 +513,11 @@ static int btmrvl_service_main_thread(void *data) BT_DBG("main_thread woke up"); + if (kthread_should_stop()) { + BT_DBG("main_thread: break from main thread"); + break; + } + spin_lock_irqsave(&priv->driver_lock, flags); if (adapter->int_count) { adapter->int_count = 0; diff --git a/trunk/drivers/bluetooth/btmrvl_sdio.c b/trunk/drivers/bluetooth/btmrvl_sdio.c index 13693b7a0d5c..c63488c54f4a 100644 --- a/trunk/drivers/bluetooth/btmrvl_sdio.c +++ b/trunk/drivers/bluetooth/btmrvl_sdio.c @@ -82,23 +82,6 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = { .io_port_2 = 0x7a, }; -static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = { - .cfg = 0x00, - .host_int_mask = 0x02, - .host_intstatus = 0x03, - .card_status = 0x50, - .sq_read_base_addr_a0 = 0x60, - .sq_read_base_addr_a1 = 0x61, - .card_revision = 0xbc, - .card_fw_status0 = 0xc0, - .card_fw_status1 = 0xc1, - .card_rx_len = 0xc2, - .card_rx_unit = 0xc3, - .io_port_0 = 0xd8, - .io_port_1 = 0xd9, - .io_port_2 = 0xda, -}; - static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { .helper = "mrvl/sd8688_helper.bin", .firmware = "mrvl/sd8688.bin", @@ -120,13 +103,6 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = { .sd_blksz_fw_dl = 256, }; -static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { - .helper = NULL, - .firmware = "mrvl/sd8897_uapsta.bin", - .reg = &btmrvl_reg_88xx, - .sd_blksz_fw_dl = 256, -}; - static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8688 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), @@ -140,9 +116,6 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8797 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, - /* Marvell SD8897 Bluetooth device */ - { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E), - .driver_data = (unsigned long) &btmrvl_sdio_sd8897 }, { } /* Terminating entry */ }; @@ -1221,4 +1194,3 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin"); MODULE_FIRMWARE("mrvl/sd8688.bin"); MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); -MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); diff --git a/trunk/drivers/char/hw_random/mxc-rnga.c b/trunk/drivers/char/hw_random/mxc-rnga.c index 19a12ac64a9e..4ca35e8a5d8c 100644 --- a/trunk/drivers/char/hw_random/mxc-rnga.c +++ b/trunk/drivers/char/hw_random/mxc-rnga.c @@ -167,6 +167,11 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) clk_prepare_enable(mxc_rng->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + err = -ENOENT; + goto err_region; + } + mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mxc_rng->mem)) { err = PTR_ERR(mxc_rng->mem); @@ -184,6 +189,7 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) return 0; err_ioremap: +err_region: clk_disable_unprepare(mxc_rng->clk); out: diff --git a/trunk/drivers/char/hw_random/omap-rng.c b/trunk/drivers/char/hw_random/omap-rng.c index d2903e772270..749dc16ca2cc 100644 --- a/trunk/drivers/char/hw_random/omap-rng.c +++ b/trunk/drivers/char/hw_random/omap-rng.c @@ -119,6 +119,11 @@ static int omap_rng_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!priv->mem_res) { + ret = -ENOENT; + goto err_ioremap; + } + priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); diff --git a/trunk/drivers/char/ipmi/ipmi_bt_sm.c b/trunk/drivers/char/ipmi/ipmi_bt_sm.c index a22a7a502740..cdd4c09fda96 100644 --- a/trunk/drivers/char/ipmi/ipmi_bt_sm.c +++ b/trunk/drivers/char/ipmi/ipmi_bt_sm.c @@ -95,9 +95,9 @@ struct si_sm_data { enum bt_states state; unsigned char seq; /* BT sequence number */ struct si_sm_io *io; - unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ + unsigned char write_data[IPMI_MAX_MSG_LENGTH]; int write_count; - unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ + unsigned char read_data[IPMI_MAX_MSG_LENGTH]; int read_count; int truncated; long timeout; /* microseconds countdown */ diff --git a/trunk/drivers/char/ipmi/ipmi_devintf.c b/trunk/drivers/char/ipmi/ipmi_devintf.c index d5a5f020810a..9eb360ff8cab 100644 --- a/trunk/drivers/char/ipmi/ipmi_devintf.c +++ b/trunk/drivers/char/ipmi/ipmi_devintf.c @@ -837,25 +837,13 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, return ipmi_ioctl(filep, cmd, arg); } } - -static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd, - unsigned long arg) -{ - int ret; - - mutex_lock(&ipmi_mutex); - ret = compat_ipmi_ioctl(filep, cmd, arg); - mutex_unlock(&ipmi_mutex); - - return ret; -} #endif static const struct file_operations ipmi_fops = { .owner = THIS_MODULE, .unlocked_ioctl = ipmi_unlocked_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = unlocked_compat_ipmi_ioctl, + .compat_ioctl = compat_ipmi_ioctl, #endif .open = ipmi_open, .release = ipmi_release, diff --git a/trunk/drivers/char/ipmi/ipmi_msghandler.c b/trunk/drivers/char/ipmi/ipmi_msghandler.c index 4445fa164a2d..4d439d2fcfd6 100644 --- a/trunk/drivers/char/ipmi/ipmi_msghandler.c +++ b/trunk/drivers/char/ipmi/ipmi_msghandler.c @@ -2037,11 +2037,12 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; - entry->name = kstrdup(name, GFP_KERNEL); + entry->name = kmalloc(strlen(name)+1, GFP_KERNEL); if (!entry->name) { kfree(entry); return -ENOMEM; } + strcpy(entry->name, name); file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); if (!file) { diff --git a/trunk/drivers/char/ipmi/ipmi_si_intf.c b/trunk/drivers/char/ipmi/ipmi_si_intf.c index af4b23ffc5a6..313538abe63c 100644 --- a/trunk/drivers/char/ipmi/ipmi_si_intf.c +++ b/trunk/drivers/char/ipmi/ipmi_si_intf.c @@ -663,10 +663,8 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { - dev_warn(smi_info->dev, - "Couldn't get irq info: %x.\n", msg[2]); - dev_warn(smi_info->dev, - "Maybe ok, but ipmi might run very slowly.\n"); + dev_warn(smi_info->dev, "Could not enable interrupts" + ", failed get, using polled mode.\n"); smi_info->si_state = SI_NORMAL; } else { msg[0] = (IPMI_NETFN_APP_REQUEST << 2); @@ -687,12 +685,10 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) { - dev_warn(smi_info->dev, - "Couldn't set irq info: %x.\n", msg[2]); - dev_warn(smi_info->dev, - "Maybe ok, but ipmi might run very slowly.\n"); - } else + if (msg[2] != 0) + dev_warn(smi_info->dev, "Could not enable interrupts" + ", failed set, using polled mode.\n"); + else smi_info->interrupt_disabled = 0; smi_info->si_state = SI_NORMAL; break; diff --git a/trunk/drivers/char/lp.c b/trunk/drivers/char/lp.c index 0913d79424d3..dafd9ac6428f 100644 --- a/trunk/drivers/char/lp.c +++ b/trunk/drivers/char/lp.c @@ -622,12 +622,9 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd, return -EFAULT; break; case LPGETSTATUS: - if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) - return -EINTR; lp_claim_parport_or_block (&lp_table[minor]); status = r_str(minor); lp_release_parport (&lp_table[minor]); - mutex_unlock(&lp_table[minor].port_mutex); if (copy_to_user(argp, &status, sizeof(int))) return -EFAULT; diff --git a/trunk/drivers/char/random.c b/trunk/drivers/char/random.c index 35487e8ded59..cd9a6211dcad 100644 --- a/trunk/drivers/char/random.c +++ b/trunk/drivers/char/random.c @@ -865,24 +865,16 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, if (r->entropy_count / 8 < min + reserved) { nbytes = 0; } else { - int entropy_count, orig; -retry: - entropy_count = orig = ACCESS_ONCE(r->entropy_count); /* If limited, never pull more than available */ - if (r->limit && nbytes + reserved >= entropy_count / 8) - nbytes = entropy_count/8 - reserved; - - if (entropy_count / 8 >= nbytes + reserved) { - entropy_count -= nbytes*8; - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) - goto retry; - } else { - entropy_count = reserved; - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) - goto retry; - } + if (r->limit && nbytes + reserved >= r->entropy_count / 8) + nbytes = r->entropy_count/8 - reserved; - if (entropy_count < random_write_wakeup_thresh) + if (r->entropy_count / 8 >= nbytes + reserved) + r->entropy_count -= nbytes*8; + else + r->entropy_count = reserved; + + if (r->entropy_count < random_write_wakeup_thresh) wakeup_write = 1; } @@ -965,23 +957,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, { ssize_t ret = 0, i; __u8 tmp[EXTRACT_SIZE]; - unsigned long flags; /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ - if (fips_enabled) { - spin_lock_irqsave(&r->lock, flags); - if (!r->last_data_init) { - r->last_data_init = true; - spin_unlock_irqrestore(&r->lock, flags); - trace_extract_entropy(r->name, EXTRACT_SIZE, - r->entropy_count, _RET_IP_); - xfer_secondary_pool(r, EXTRACT_SIZE); - extract_buf(r, tmp); - spin_lock_irqsave(&r->lock, flags); - memcpy(r->last_data, tmp, EXTRACT_SIZE); - } - spin_unlock_irqrestore(&r->lock, flags); - } + if (fips_enabled && !r->last_data_init) + nbytes += EXTRACT_SIZE; trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_); xfer_secondary_pool(r, nbytes); @@ -991,6 +970,19 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, extract_buf(r, tmp); if (fips_enabled) { + unsigned long flags; + + + /* prime last_data value if need be, per fips 140-2 */ + if (!r->last_data_init) { + spin_lock_irqsave(&r->lock, flags); + memcpy(r->last_data, tmp, EXTRACT_SIZE); + r->last_data_init = true; + nbytes -= EXTRACT_SIZE; + spin_unlock_irqrestore(&r->lock, flags); + extract_buf(r, tmp); + } + spin_lock_irqsave(&r->lock, flags); if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) panic("Hardware RNG duplicated output!\n"); diff --git a/trunk/drivers/char/ttyprintk.c b/trunk/drivers/char/ttyprintk.c index d5d2e4a985aa..4945bd3d18d0 100644 --- a/trunk/drivers/char/ttyprintk.c +++ b/trunk/drivers/char/ttyprintk.c @@ -179,6 +179,7 @@ static int __init ttyprintk_init(void) { int ret = -ENOMEM; + tpk_port.port.ops = &null_ops; mutex_init(&tpk_port.port_write_mutex); ttyprintk_driver = tty_alloc_driver(1, @@ -189,7 +190,6 @@ static int __init ttyprintk_init(void) return PTR_ERR(ttyprintk_driver); tty_port_init(&tpk_port.port); - tpk_port.port.ops = &null_ops; ttyprintk_driver->driver_name = "ttyprintk"; ttyprintk_driver->name = "ttyprintk"; diff --git a/trunk/drivers/clk/clk-si5351.c b/trunk/drivers/clk/clk-si5351.c index 24f553673b72..892728412e9d 100644 --- a/trunk/drivers/clk/clk-si5351.c +++ b/trunk/drivers/clk/clk-si5351.c @@ -932,7 +932,7 @@ static unsigned long si5351_clkout_recalc_rate(struct clk_hw *hw, unsigned char reg; unsigned char rdiv; - if (hwdata->num <= 5) + if (hwdata->num > 5) reg = si5351_msynth_params_address(hwdata->num) + 2; else reg = SI5351_CLK6_7_OUTPUT_DIVIDER; @@ -1477,16 +1477,6 @@ static int si5351_i2c_probe(struct i2c_client *client, return -EINVAL; } drvdata->onecell.clks[n] = clk; - - /* set initial clkout rate */ - if (pdata->clkout[n].rate != 0) { - int ret; - ret = clk_set_rate(clk, pdata->clkout[n].rate); - if (ret != 0) { - dev_err(&client->dev, "Cannot set rate : %d\n", - ret); - } - } } ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get, diff --git a/trunk/drivers/clk/clk-vt8500.c b/trunk/drivers/clk/clk-vt8500.c index 553ac35bcc91..debf688afa8e 100644 --- a/trunk/drivers/clk/clk-vt8500.c +++ b/trunk/drivers/clk/clk-vt8500.c @@ -183,7 +183,7 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate, writel(divisor, cdev->div_reg); vt8500_pmc_wait_busy(); - spin_unlock_irqrestore(cdev->lock, flags); + spin_lock_irqsave(cdev->lock, flags); return 0; } diff --git a/trunk/drivers/clk/clk.c b/trunk/drivers/clk/clk.c index 1144e8c7579d..934cfd18f72d 100644 --- a/trunk/drivers/clk/clk.c +++ b/trunk/drivers/clk/clk.c @@ -1955,7 +1955,6 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) /* XXX the notifier code should handle this better */ if (!cn->notifier_head.head) { srcu_cleanup_notifier_head(&cn->notifier_head); - list_del(&cn->node); kfree(cn); } diff --git a/trunk/drivers/clk/mxs/clk-imx28.c b/trunk/drivers/clk/mxs/clk-imx28.c index 4faf0afc44cd..d0e5eed146de 100644 --- a/trunk/drivers/clk/mxs/clk-imx28.c +++ b/trunk/drivers/clk/mxs/clk-imx28.c @@ -10,7 +10,6 @@ */ #include -#include #include #include #include diff --git a/trunk/drivers/clk/samsung/clk-exynos4.c b/trunk/drivers/clk/samsung/clk-exynos4.c index 3c1f88868f29..d0940e69d034 100644 --- a/trunk/drivers/clk/samsung/clk-exynos4.c +++ b/trunk/drivers/clk/samsung/clk-exynos4.c @@ -791,8 +791,7 @@ struct samsung_gate_clock exynos4210_gate_clks[] __initdata = { GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), - GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, - CLK_IGNORE_UNUSED, 0), + GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0), GATE(smmu_rotator, "smmu_rotator", "aclk200", E4210_GATE_IP_IMAGE, 4, 0, 0), @@ -820,8 +819,7 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0), GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), - GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, - CLK_IGNORE_UNUSED, 0), + GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, 0, 0), GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0), GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0", SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0), diff --git a/trunk/drivers/clk/samsung/clk-exynos5250.c b/trunk/drivers/clk/samsung/clk-exynos5250.c index 22d7699e7ced..5c97e75924a8 100644 --- a/trunk/drivers/clk/samsung/clk-exynos5250.c +++ b/trunk/drivers/clk/samsung/clk-exynos5250.c @@ -155,7 +155,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = { /* list of all parent clock list */ PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; -PNAME(mout_cpu_p) = { "mout_apll", "sclk_mpll", }; +PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", }; PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" }; PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" }; PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" }; @@ -208,10 +208,10 @@ struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = { }; struct samsung_mux_clock exynos5250_mux_clks[] __initdata = { - MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"), - MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"), + MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1), + MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1), MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1), - MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"), + MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1), MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1), MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1), MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1), @@ -378,7 +378,7 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), - GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), + GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0), GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0), diff --git a/trunk/drivers/clk/samsung/clk-pll.c b/trunk/drivers/clk/samsung/clk-pll.c index 362f12dcd944..89135f6be116 100644 --- a/trunk/drivers/clk/samsung/clk-pll.c +++ b/trunk/drivers/clk/samsung/clk-pll.c @@ -111,8 +111,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw); - u32 mdiv, pdiv, sdiv, pll_con0, pll_con1; - s16 kdiv; + u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1; u64 fvco = parent_rate; pll_con0 = __raw_readl(pll->con_reg); @@ -120,7 +119,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw, mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK; pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK; sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK; - kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK); + kdiv = pll_con1 & PLL36XX_KDIV_MASK; fvco *= (mdiv << 16) + kdiv; do_div(fvco, (pdiv << sdiv)); diff --git a/trunk/drivers/clk/spear/spear3xx_clock.c b/trunk/drivers/clk/spear/spear3xx_clock.c index 080c3c5e33f6..f9ec43fd1320 100644 --- a/trunk/drivers/clk/spear/spear3xx_clock.c +++ b/trunk/drivers/clk/spear/spear3xx_clock.c @@ -369,7 +369,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base) clk_register_clkdev(clk, NULL, "60100000.serial"); } #else -static inline void spear320_clk_init(void __iomem *soc_config_base) { } +static inline void spear320_clk_init(void) { } #endif void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base) diff --git a/trunk/drivers/clk/tegra/clk-tegra20.c b/trunk/drivers/clk/tegra/clk-tegra20.c index 075db0c99edb..8292a00c3de9 100644 --- a/trunk/drivers/clk/tegra/clk-tegra20.c +++ b/trunk/drivers/clk/tegra/clk-tegra20.c @@ -872,14 +872,6 @@ static void __init tegra20_periph_clk_init(void) struct clk *clk; int i; - /* ac97 */ - clk = tegra_clk_register_periph_gate("ac97", "pll_a_out0", - TEGRA_PERIPH_ON_APB, - clk_base, 0, 3, &periph_l_regs, - periph_clk_enb_refcnt); - clk_register_clkdev(clk, NULL, "tegra20-ac97"); - clks[ac97] = clk; - /* apbdma */ clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base, 0, 34, &periph_h_regs, @@ -1242,6 +1234,9 @@ static __initdata struct tegra_clk_init_table init_table[] = { {uartc, pll_p, 0, 0}, {uartd, pll_p, 0, 0}, {uarte, pll_p, 0, 0}, + {usbd, clk_max, 12000000, 0}, + {usb2, clk_max, 12000000, 0}, + {usb3, clk_max, 12000000, 0}, {pll_a, clk_max, 56448000, 1}, {pll_a_out0, clk_max, 11289600, 1}, {cdev1, clk_max, 0, 1}, diff --git a/trunk/drivers/clk/tegra/clk-tegra30.c b/trunk/drivers/clk/tegra/clk-tegra30.c index ba99e3844106..c6921f538e28 100644 --- a/trunk/drivers/clk/tegra/clk-tegra30.c +++ b/trunk/drivers/clk/tegra/clk-tegra30.c @@ -1598,12 +1598,6 @@ static void __init tegra30_periph_clk_init(void) clk_register_clkdev(clk, "afi", "tegra-pcie"); clks[afi] = clk; - /* pciex */ - clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0, - 74, &periph_u_regs, periph_clk_enb_refcnt); - clk_register_clkdev(clk, "pciex", "tegra-pcie"); - clks[pciex] = clk; - /* kfuse */ clk = tegra_clk_register_periph_gate("kfuse", "clk_m", TEGRA_PERIPH_ON_APB, @@ -1722,6 +1716,11 @@ static void __init tegra30_fixed_clk_init(void) 1, 0, &cml_lock); clk_register_clkdev(clk, "cml1", NULL); clks[cml1] = clk; + + /* pciex */ + clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000); + clk_register_clkdev(clk, "pciex", NULL); + clks[pciex] = clk; } static void __init tegra30_osc_clk_init(void) diff --git a/trunk/drivers/clk/ux500/clk-sysctrl.c b/trunk/drivers/clk/ux500/clk-sysctrl.c index e364c9d4aa60..bc7e9bde792b 100644 --- a/trunk/drivers/clk/ux500/clk-sysctrl.c +++ b/trunk/drivers/clk/ux500/clk-sysctrl.c @@ -145,13 +145,7 @@ static struct clk *clk_reg_sysctrl(struct device *dev, return ERR_PTR(-ENOMEM); } - /* set main clock registers */ - clk->reg_sel[0] = reg_sel[0]; - clk->reg_bits[0] = reg_bits[0]; - clk->reg_mask[0] = reg_mask[0]; - - /* handle clocks with more than one parent */ - for (i = 1; i < num_parents; i++) { + for (i = 0; i < num_parents; i++) { clk->reg_sel[i] = reg_sel[i]; clk->reg_bits[i] = reg_bits[i]; clk->reg_mask[i] = reg_mask[i]; diff --git a/trunk/drivers/clk/ux500/u8500_clk.c b/trunk/drivers/clk/ux500/u8500_clk.c index 80069c370a47..0b4f35a5ffc2 100644 --- a/trunk/drivers/clk/ux500/u8500_clk.c +++ b/trunk/drivers/clk/ux500/u8500_clk.c @@ -325,7 +325,7 @@ void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base, BIT(0), 0); clk_register_clkdev(clk, "fsmc", NULL); - clk_register_clkdev(clk, NULL, "smsc911x.0"); + clk_register_clkdev(clk, NULL, "smsc911x"); clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base, BIT(1), 0); diff --git a/trunk/drivers/clk/x86/clk-lpt.c b/trunk/drivers/clk/x86/clk-lpt.c index 4f45eee9e33b..5cf4f4686406 100644 --- a/trunk/drivers/clk/x86/clk-lpt.c +++ b/trunk/drivers/clk/x86/clk-lpt.c @@ -15,29 +15,22 @@ #include #include #include -#include #include #define PRV_CLOCK_PARAMS 0x800 static int lpt_clk_probe(struct platform_device *pdev) { - struct lpss_clk_data *drvdata; struct clk *clk; - drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); - if (!drvdata) - return -ENOMEM; - /* LPSS free running clock */ - drvdata->name = "lpss_clk"; - clk = clk_register_fixed_rate(&pdev->dev, drvdata->name, NULL, - CLK_IS_ROOT, 100000000); + clk = clk_register_fixed_rate(&pdev->dev, "lpss_clk", NULL, CLK_IS_ROOT, + 100000000); if (IS_ERR(clk)) return PTR_ERR(clk); - drvdata->clk = clk; - platform_set_drvdata(pdev, drvdata); + /* Shared DMA clock */ + clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto"); return 0; } diff --git a/trunk/drivers/cpufreq/Kconfig b/trunk/drivers/cpufreq/Kconfig index 534fcb825153..a1488f58f6ca 100644 --- a/trunk/drivers/cpufreq/Kconfig +++ b/trunk/drivers/cpufreq/Kconfig @@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS choice prompt "Default CPUFreq governor" - default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ + default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 default CPU_FREQ_DEFAULT_GOV_PERFORMANCE help This option sets which CPUFreq governor shall be loaded at diff --git a/trunk/drivers/cpufreq/Kconfig.arm b/trunk/drivers/cpufreq/Kconfig.arm index 6e57543fe0b9..f3af18b9acc5 100644 --- a/trunk/drivers/cpufreq/Kconfig.arm +++ b/trunk/drivers/cpufreq/Kconfig.arm @@ -3,17 +3,16 @@ # config ARM_BIG_LITTLE_CPUFREQ - tristate "Generic ARM big LITTLE CPUfreq driver" - depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK - help - This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. + tristate + depends on ARM_CPU_TOPOLOGY config ARM_DT_BL_CPUFREQ - tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver" - depends on ARM_BIG_LITTLE_CPUFREQ && OF + tristate "Generic ARM big LITTLE CPUfreq driver probed via DT" + select ARM_BIG_LITTLE_CPUFREQ + depends on OF && HAVE_CLK help - This enables probing via DT for Generic CPUfreq driver for ARM - big.LITTLE platform. This gets frequency tables from DT. + This enables the Generic CPUfreq driver for ARM big.LITTLE platform. + This gets frequency tables from DT. config ARM_EXYNOS_CPUFREQ bool "SAMSUNG EXYNOS SoCs" diff --git a/trunk/drivers/cpufreq/Kconfig.x86 b/trunk/drivers/cpufreq/Kconfig.x86 index 6bd63d63d356..2b8a8c374548 100644 --- a/trunk/drivers/cpufreq/Kconfig.x86 +++ b/trunk/drivers/cpufreq/Kconfig.x86 @@ -272,7 +272,7 @@ config X86_LONGHAUL config X86_E_POWERSAVER tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" select CPU_FREQ_TABLE - depends on X86_32 && ACPI_PROCESSOR + depends on X86_32 help This adds the CPUFreq driver for VIA C7 processors. However, this driver does not have any safeguards to prevent operating the CPU out of spec diff --git a/trunk/drivers/cpufreq/acpi-cpufreq.c b/trunk/drivers/cpufreq/acpi-cpufreq.c index edc089e9d0c4..11b8b4b54ceb 100644 --- a/trunk/drivers/cpufreq/acpi-cpufreq.c +++ b/trunk/drivers/cpufreq/acpi-cpufreq.c @@ -347,11 +347,11 @@ static u32 get_cur_val(const struct cpumask *mask) switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_IA32_PERF_CTL; + cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; break; case SYSTEM_AMD_MSR_CAPABLE: cmd.type = SYSTEM_AMD_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_AMD_PERF_CTL; + cmd.addr.msr.reg = MSR_AMD_PERF_STATUS; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; diff --git a/trunk/drivers/cpufreq/arm_big_little.c b/trunk/drivers/cpufreq/arm_big_little.c index 5d7f53fcd6f5..dbdf677d2f36 100644 --- a/trunk/drivers/cpufreq/arm_big_little.c +++ b/trunk/drivers/cpufreq/arm_big_little.c @@ -40,6 +40,11 @@ static struct clk *clk[MAX_CLUSTERS]; static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; +static int cpu_to_cluster(int cpu) +{ + return topology_physical_package_id(cpu); +} + static unsigned int bL_cpufreq_get(unsigned int cpu) { u32 cur_cluster = cpu_to_cluster(cpu); @@ -187,7 +192,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); - dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); + dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu); return 0; } diff --git a/trunk/drivers/cpufreq/arm_big_little.h b/trunk/drivers/cpufreq/arm_big_little.h index 79b2ce17884d..70f18fc12d4a 100644 --- a/trunk/drivers/cpufreq/arm_big_little.h +++ b/trunk/drivers/cpufreq/arm_big_little.h @@ -34,11 +34,6 @@ struct cpufreq_arm_bL_ops { int (*init_opp_table)(struct device *cpu_dev); }; -static inline int cpu_to_cluster(int cpu) -{ - return topology_physical_package_id(cpu); -} - int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); diff --git a/trunk/drivers/cpufreq/arm_big_little_dt.c b/trunk/drivers/cpufreq/arm_big_little_dt.c index fd9e3ea6a480..44be3115375c 100644 --- a/trunk/drivers/cpufreq/arm_big_little_dt.c +++ b/trunk/drivers/cpufreq/arm_big_little_dt.c @@ -19,75 +19,69 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include #include #include #include #include #include -#include #include #include #include "arm_big_little.h" -/* get cpu node with valid operating-points */ -static struct device_node *get_cpu_node_with_valid_op(int cpu) +static int dt_init_opp_table(struct device *cpu_dev) { - struct device_node *np = NULL, *parent; - int count = 0; + struct device_node *np, *parent; + int count = 0, ret; parent = of_find_node_by_path("/cpus"); if (!parent) { pr_err("failed to find OF /cpus\n"); - return NULL; + return -ENOENT; } for_each_child_of_node(parent, np) { - if (count++ != cpu) + if (count++ != cpu_dev->id) continue; if (!of_get_property(np, "operating-points", NULL)) { - of_node_put(np); - np = NULL; + ret = -ENODATA; + } else { + cpu_dev->of_node = np; + ret = of_init_opp_table(cpu_dev); } + of_node_put(np); + of_node_put(parent); - break; + return ret; } - of_node_put(parent); - return np; -} - -static int dt_init_opp_table(struct device *cpu_dev) -{ - struct device_node *np; - int ret; - - np = get_cpu_node_with_valid_op(cpu_dev->id); - if (!np) - return -ENODATA; - - cpu_dev->of_node = np; - ret = of_init_opp_table(cpu_dev); - of_node_put(np); - - return ret; + return -ENODEV; } static int dt_get_transition_latency(struct device *cpu_dev) { - struct device_node *np; + struct device_node *np, *parent; u32 transition_latency = CPUFREQ_ETERNAL; + int count = 0; - np = get_cpu_node_with_valid_op(cpu_dev->id); - if (!np) - return CPUFREQ_ETERNAL; + parent = of_find_node_by_path("/cpus"); + if (!parent) { + pr_err("failed to find OF /cpus\n"); + return -ENOENT; + } + + for_each_child_of_node(parent, np) { + if (count++ != cpu_dev->id) + continue; - of_property_read_u32(np, "clock-latency", &transition_latency); - of_node_put(np); + of_property_read_u32(np, "clock-latency", &transition_latency); + of_node_put(np); + of_node_put(parent); - pr_debug("%s: clock-latency: %d\n", __func__, transition_latency); - return transition_latency; + return 0; + } + + return -ENODEV; } static struct cpufreq_arm_bL_ops dt_bL_ops = { @@ -96,33 +90,17 @@ static struct cpufreq_arm_bL_ops dt_bL_ops = { .init_opp_table = dt_init_opp_table, }; -static int generic_bL_probe(struct platform_device *pdev) +static int generic_bL_init(void) { - struct device_node *np; - - np = get_cpu_node_with_valid_op(0); - if (!np) - return -ENODEV; - - of_node_put(np); return bL_cpufreq_register(&dt_bL_ops); } +module_init(generic_bL_init); -static int generic_bL_remove(struct platform_device *pdev) +static void generic_bL_exit(void) { - bL_cpufreq_unregister(&dt_bL_ops); - return 0; + return bL_cpufreq_unregister(&dt_bL_ops); } - -static struct platform_driver generic_bL_platdrv = { - .driver = { - .name = "arm-bL-cpufreq-dt", - .owner = THIS_MODULE, - }, - .probe = generic_bL_probe, - .remove = generic_bL_remove, -}; -module_platform_driver(generic_bL_platdrv); +module_exit(generic_bL_exit); MODULE_AUTHOR("Viresh Kumar "); MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT"); diff --git a/trunk/drivers/cpufreq/cpufreq-cpu0.c b/trunk/drivers/cpufreq/cpufreq-cpu0.c index ad1fde277661..3ab8294eab04 100644 --- a/trunk/drivers/cpufreq/cpufreq-cpu0.c +++ b/trunk/drivers/cpufreq/cpufreq-cpu0.c @@ -45,7 +45,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; struct opp *opp; unsigned long volt = 0, volt_old = 0, tol = 0; - long freq_Hz, freq_exact; + long freq_Hz; unsigned int index; int ret; @@ -60,7 +60,6 @@ static int cpu0_set_target(struct cpufreq_policy *policy, freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); if (freq_Hz < 0) freq_Hz = freq_table[index].frequency * 1000; - freq_exact = freq_Hz; freqs.new = freq_Hz / 1000; freqs.old = clk_get_rate(cpu_clk) / 1000; @@ -99,7 +98,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, } } - ret = clk_set_rate(cpu_clk, freq_exact); + ret = clk_set_rate(cpu_clk, freqs.new * 1000); if (ret) { pr_err("failed to set clock rate: %d\n", ret); if (cpu_reg) @@ -190,29 +189,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) if (!np) { pr_err("failed to find cpu0 node\n"); - ret = -ENOENT; - goto out_put_parent; + return -ENOENT; } cpu_dev = &pdev->dev; cpu_dev->of_node = np; - cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); - if (IS_ERR(cpu_reg)) { - /* - * If cpu0 regulator supply node is present, but regulator is - * not yet registered, we should try defering probe. - */ - if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { - dev_err(cpu_dev, "cpu0 regulator not ready, retry\n"); - ret = -EPROBE_DEFER; - goto out_put_node; - } - pr_warn("failed to get cpu0 regulator: %ld\n", - PTR_ERR(cpu_reg)); - cpu_reg = NULL; - } - cpu_clk = devm_clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); @@ -220,6 +202,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) goto out_put_node; } + cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); + if (IS_ERR(cpu_reg)) { + pr_warn("failed to get cpu0 regulator\n"); + cpu_reg = NULL; + } + ret = of_init_opp_table(cpu_dev); if (ret) { pr_err("failed to init OPP table: %d\n", ret); @@ -276,8 +264,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) opp_free_cpufreq_table(cpu_dev, &freq_table); out_put_node: of_node_put(np); -out_put_parent: - of_node_put(parent); return ret; } diff --git a/trunk/drivers/cpufreq/cpufreq.c b/trunk/drivers/cpufreq/cpufreq.c index 2d53f47d1747..1b8a48eaf90f 100644 --- a/trunk/drivers/cpufreq/cpufreq.c +++ b/trunk/drivers/cpufreq/cpufreq.c @@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif __func__, cpu_dev->id, cpu); } - if ((cpus == 1) && (cpufreq_driver->target)) - __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); - pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); cpufreq_cpu_put(data); /* If cpu is last user of policy, free policy */ if (cpus == 1) { + if (cpufreq_driver->target) + __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); + lock_policy_rwsem_read(cpu); kobj = &data->kobj; cmp = &data->kobj_unregister; @@ -1729,23 +1729,18 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, /* end old governor */ if (data->governor) { __cpufreq_governor(data, CPUFREQ_GOV_STOP); - unlock_policy_rwsem_write(policy->cpu); __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); - lock_policy_rwsem_write(policy->cpu); } /* start new governor */ data->governor = policy->governor; if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) { - if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) { + if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) failed = 0; - } else { - unlock_policy_rwsem_write(policy->cpu); + else __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); - lock_policy_rwsem_write(policy->cpu); - } } if (failed) { @@ -1837,13 +1832,15 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, if (dev) { switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: cpufreq_add_dev(dev, NULL); break; case CPU_DOWN_PREPARE: - case CPU_UP_CANCELED_FROZEN: + case CPU_DOWN_PREPARE_FROZEN: __cpufreq_remove_dev(dev, NULL); break; case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: cpufreq_add_dev(dev, NULL); break; } diff --git a/trunk/drivers/cpufreq/cpufreq_governor.c b/trunk/drivers/cpufreq/cpufreq_governor.c index dc9b72e25c1a..443442df113b 100644 --- a/trunk/drivers/cpufreq/cpufreq_governor.c +++ b/trunk/drivers/cpufreq/cpufreq_governor.c @@ -26,7 +26,6 @@ #include #include #include -#include #include "cpufreq_governor.h" @@ -181,10 +180,8 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, if (!all_cpus) { __gov_queue_work(smp_processor_id(), dbs_data, delay); } else { - get_online_cpus(); for_each_cpu(i, policy->cpus) __gov_queue_work(i, dbs_data, delay); - put_online_cpus(); } } EXPORT_SYMBOL_GPL(gov_queue_work); @@ -258,7 +255,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (have_governor_per_policy()) { WARN_ON(dbs_data); } else if (dbs_data) { - dbs_data->usage_count++; policy->governor_data = dbs_data; return 0; } @@ -270,7 +266,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, } dbs_data->cdata = cdata; - dbs_data->usage_count = 1; rc = cdata->init(dbs_data); if (rc) { pr_err("%s: POLICY_INIT: init() failed\n", __func__); @@ -299,8 +294,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, latency * LATENCY_MULTIPLIER)); - if ((cdata->governor == GOV_CONSERVATIVE) && - (!policy->governor->initialized)) { + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; cpufreq_register_notifier(cs_ops->notifier_block, @@ -312,12 +306,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, return 0; case CPUFREQ_GOV_POLICY_EXIT: - if (!--dbs_data->usage_count) { + if ((policy->governor->initialized == 1) || + have_governor_per_policy()) { sysfs_remove_group(get_governor_parent_kobj(policy), get_sysfs_attr(dbs_data)); - if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && - (policy->governor->initialized == 1)) { + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; cpufreq_unregister_notifier(cs_ops->notifier_block, diff --git a/trunk/drivers/cpufreq/cpufreq_governor.h b/trunk/drivers/cpufreq/cpufreq_governor.h index e16a96130cb3..8ac33538d0bd 100644 --- a/trunk/drivers/cpufreq/cpufreq_governor.h +++ b/trunk/drivers/cpufreq/cpufreq_governor.h @@ -211,7 +211,6 @@ struct common_dbs_data { struct dbs_data { struct common_dbs_data *cdata; unsigned int min_sampling_rate; - int usage_count; void *tuners; /* dbs_mutex protects dbs_enable in governor start/stop */ diff --git a/trunk/drivers/cpufreq/cpufreq_ondemand.c b/trunk/drivers/cpufreq/cpufreq_ondemand.c index 93eb5cbcc1f6..b0ffef96bf77 100644 --- a/trunk/drivers/cpufreq/cpufreq_ondemand.c +++ b/trunk/drivers/cpufreq/cpufreq_ondemand.c @@ -47,8 +47,6 @@ static struct od_ops od_ops; static struct cpufreq_governor cpufreq_gov_ondemand; #endif -static unsigned int default_powersave_bias; - static void ondemand_powersave_bias_init_cpu(int cpu) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); @@ -545,10 +543,11 @@ static int od_init(struct dbs_data *dbs_data) tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; tuners->ignore_nice = 0; - tuners->powersave_bias = default_powersave_bias; + tuners->powersave_bias = 0; tuners->io_is_busy = should_io_be_busy(); dbs_data->tuners = tuners; + pr_info("%s: tuners %p\n", __func__, tuners); mutex_init(&dbs_data->mutex); return 0; } @@ -587,7 +586,6 @@ static void od_set_powersave_bias(unsigned int powersave_bias) unsigned int cpu; cpumask_t done; - default_powersave_bias = powersave_bias; cpumask_clear(&done); get_online_cpus(); @@ -596,17 +594,11 @@ static void od_set_powersave_bias(unsigned int powersave_bias) continue; policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; - if (!policy) - continue; - - cpumask_or(&done, &done, policy->cpus); - - if (policy->governor != &cpufreq_gov_ondemand) - continue; - dbs_data = policy->governor_data; od_tuners = dbs_data->tuners; - od_tuners->powersave_bias = default_powersave_bias; + od_tuners->powersave_bias = powersave_bias; + + cpumask_or(&done, &done, policy->cpus); } put_online_cpus(); } diff --git a/trunk/drivers/cpufreq/cpufreq_stats.c b/trunk/drivers/cpufreq/cpufreq_stats.c index fb65decffa28..bfd6273fd873 100644 --- a/trunk/drivers/cpufreq/cpufreq_stats.c +++ b/trunk/drivers/cpufreq/cpufreq_stats.c @@ -349,16 +349,15 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: cpufreq_update_policy(cpu); break; case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: cpufreq_stats_free_sysfs(cpu); break; case CPU_DEAD: - cpufreq_stats_free_table(cpu); - break; - case CPU_UP_CANCELED_FROZEN: - cpufreq_stats_free_sysfs(cpu); + case CPU_DEAD_FROZEN: cpufreq_stats_free_table(cpu); break; } diff --git a/trunk/drivers/cpufreq/intel_pstate.c b/trunk/drivers/cpufreq/intel_pstate.c index 07f2840ad805..cc3a8e6c92be 100644 --- a/trunk/drivers/cpufreq/intel_pstate.c +++ b/trunk/drivers/cpufreq/intel_pstate.c @@ -48,7 +48,12 @@ static inline int32_t div_fp(int32_t x, int32_t y) } struct sample { + ktime_t start_time; + ktime_t end_time; int core_pct_busy; + int pstate_pct_busy; + u64 duration_us; + u64 idletime_us; u64 aperf; u64 mperf; int freq; @@ -81,9 +86,13 @@ struct cpudata { struct pstate_adjust_policy *pstate_policy; struct pstate_data pstate; struct _pid pid; + struct _pid idle_pid; int min_pstate_count; + int idle_mode; + ktime_t prev_sample; + u64 prev_idle_time_us; u64 prev_aperf; u64 prev_mperf; int sample_ptr; @@ -115,8 +124,6 @@ struct perf_limits { int min_perf_pct; int32_t max_perf; int32_t min_perf; - int max_policy_pct; - int max_sysfs_pct; }; static struct perf_limits limits = { @@ -125,8 +132,6 @@ static struct perf_limits limits = { .max_perf = int_tofp(1), .min_perf_pct = 0, .min_perf = 0, - .max_policy_pct = 100, - .max_sysfs_pct = 100, }; static inline void pid_reset(struct _pid *pid, int setpoint, int busy, @@ -197,6 +202,19 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 0); } +static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu) +{ + pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct); + pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct); + pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct); + + pid_reset(&cpu->idle_pid, + 75, + 50, + cpu->pstate_policy->deadband, + 0); +} + static inline void intel_pstate_reset_all_pid(void) { unsigned int cpu; @@ -284,8 +302,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; - limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); - limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); + limits.max_perf_pct = clamp_t(int, input, 0 , 100); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); return count; } @@ -391,8 +408,9 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) if (pstate == cpu->pstate.current_pstate) return; +#ifndef MODULE trace_cpu_frequency(pstate * 100000, cpu->cpu); - +#endif cpu->pstate.current_pstate = pstate; wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); @@ -432,26 +450,48 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, struct sample *sample) { u64 core_pct; + sample->pstate_pct_busy = 100 - div64_u64( + sample->idletime_us * 100, + sample->duration_us); core_pct = div64_u64(sample->aperf * 100, sample->mperf); sample->freq = cpu->pstate.max_pstate * core_pct * 1000; - sample->core_pct_busy = core_pct; + sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), + 100); } static inline void intel_pstate_sample(struct cpudata *cpu) { + ktime_t now; + u64 idle_time_us; u64 aperf, mperf; + now = ktime_get(); + idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL); + rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; - cpu->samples[cpu->sample_ptr].aperf = aperf; - cpu->samples[cpu->sample_ptr].mperf = mperf; - cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; - cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; - - intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); + /* for the first sample, don't actually record a sample, just + * set the baseline */ + if (cpu->prev_idle_time_us > 0) { + cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; + cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; + cpu->samples[cpu->sample_ptr].end_time = now; + cpu->samples[cpu->sample_ptr].duration_us = + ktime_us_delta(now, cpu->prev_sample); + cpu->samples[cpu->sample_ptr].idletime_us = + idle_time_us - cpu->prev_idle_time_us; + + cpu->samples[cpu->sample_ptr].aperf = aperf; + cpu->samples[cpu->sample_ptr].mperf = mperf; + cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; + cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; + + intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); + } + cpu->prev_sample = now; + cpu->prev_idle_time_us = idle_time_us; cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; } @@ -465,6 +505,16 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) mod_timer_pinned(&cpu->timer, jiffies + delay); } +static inline void intel_pstate_idle_mode(struct cpudata *cpu) +{ + cpu->idle_mode = 1; +} + +static inline void intel_pstate_normal_mode(struct cpudata *cpu) +{ + cpu->idle_mode = 0; +} + static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) { int32_t busy_scaled; @@ -497,21 +547,50 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) intel_pstate_pstate_decrease(cpu, steps); } +static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu) +{ + int busy_scaled; + struct _pid *pid; + int ctl = 0; + int steps; + + pid = &cpu->idle_pid; + + busy_scaled = intel_pstate_get_scaled_busy(cpu); + + ctl = pid_calc(pid, 100 - busy_scaled); + + steps = abs(ctl); + if (ctl < 0) + intel_pstate_pstate_decrease(cpu, steps); + else + intel_pstate_pstate_increase(cpu, steps); + + if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) + intel_pstate_normal_mode(cpu); +} + static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; intel_pstate_sample(cpu); - intel_pstate_adjust_busy_pstate(cpu); + if (!cpu->idle_mode) + intel_pstate_adjust_busy_pstate(cpu); + else + intel_pstate_adjust_idle_pstate(cpu); + +#if defined(XPERF_FIX) if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { cpu->min_pstate_count++; if (!(cpu->min_pstate_count % 5)) { intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); + intel_pstate_idle_mode(cpu); } } else cpu->min_pstate_count = 0; - +#endif intel_pstate_set_sample_time(cpu); } @@ -521,7 +600,6 @@ static void intel_pstate_timer_func(unsigned long __data) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x2a, default_policy), ICPU(0x2d, default_policy), - ICPU(0x3a, default_policy), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); @@ -553,6 +631,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) (unsigned long)cpu; cpu->timer.expires = jiffies + HZ/100; intel_pstate_busy_pid_reset(cpu); + intel_pstate_idle_pid_reset(cpu); intel_pstate_sample(cpu); intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); @@ -596,9 +675,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); - limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; - limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); - limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); + limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; + limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); return 0; @@ -710,9 +788,10 @@ static int __init intel_pstate_init(void) pr_info("Intel P-state driver initializing.\n"); - all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); + all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); if (!all_cpu_data) return -ENOMEM; + memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus()); rc = cpufreq_register_driver(&intel_pstate_driver); if (rc) diff --git a/trunk/drivers/cpufreq/kirkwood-cpufreq.c b/trunk/drivers/cpufreq/kirkwood-cpufreq.c index b2644af985ec..d36ea8dc96eb 100644 --- a/trunk/drivers/cpufreq/kirkwood-cpufreq.c +++ b/trunk/drivers/cpufreq/kirkwood-cpufreq.c @@ -171,6 +171,10 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) priv.dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Cannot get memory resource\n"); + return -ENODEV; + } priv.base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv.base)) return PTR_ERR(priv.base); diff --git a/trunk/drivers/cpufreq/loongson2_cpufreq.c b/trunk/drivers/cpufreq/loongson2_cpufreq.c index d53912768946..84889573b566 100644 --- a/trunk/drivers/cpufreq/loongson2_cpufreq.c +++ b/trunk/drivers/cpufreq/loongson2_cpufreq.c @@ -18,7 +18,6 @@ #include #include -#include #include @@ -201,7 +200,6 @@ static void loongson2_cpu_wait(void) LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ spin_unlock_irqrestore(&loongson2_wait_lock, flags); - local_irq_enable(); } static int __init cpufreq_init(void) diff --git a/trunk/drivers/crypto/caam/caamalg.c b/trunk/drivers/crypto/caam/caamalg.c index bf416a8391a7..765fdf5ce579 100644 --- a/trunk/drivers/crypto/caam/caamalg.c +++ b/trunk/drivers/crypto/caam/caamalg.c @@ -1154,7 +1154,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, - DMA_TO_DEVICE, assoc_chained); + DMA_BIDIRECTIONAL, assoc_chained); if (likely(req->src == req->dst)) { sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_BIDIRECTIONAL, src_chained); @@ -1336,7 +1336,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, - DMA_TO_DEVICE, assoc_chained); + DMA_BIDIRECTIONAL, assoc_chained); if (likely(req->src == req->dst)) { sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_BIDIRECTIONAL, src_chained); diff --git a/trunk/drivers/crypto/nx/nx-aes-cbc.c b/trunk/drivers/crypto/nx/nx-aes-cbc.c index 35d483f8db66..a76d4c4f29f5 100644 --- a/trunk/drivers/crypto/nx/nx-aes-cbc.c +++ b/trunk/drivers/crypto/nx/nx-aes-cbc.c @@ -126,7 +126,6 @@ struct crypto_alg nx_cbc_aes_alg = { .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_type = &crypto_blkcipher_type, - .cra_alignmask = 0xf, .cra_module = THIS_MODULE, .cra_init = nx_crypto_ctx_aes_cbc_init, .cra_exit = nx_crypto_ctx_exit, diff --git a/trunk/drivers/crypto/nx/nx-aes-ecb.c b/trunk/drivers/crypto/nx/nx-aes-ecb.c index 7bbc9a81da21..ba5f1611336f 100644 --- a/trunk/drivers/crypto/nx/nx-aes-ecb.c +++ b/trunk/drivers/crypto/nx/nx-aes-ecb.c @@ -123,7 +123,6 @@ struct crypto_alg nx_ecb_aes_alg = { .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, - .cra_alignmask = 0xf, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, diff --git a/trunk/drivers/crypto/nx/nx-aes-gcm.c b/trunk/drivers/crypto/nx/nx-aes-gcm.c index 6cca6c392b00..c8109edc5cfb 100644 --- a/trunk/drivers/crypto/nx/nx-aes-gcm.c +++ b/trunk/drivers/crypto/nx/nx-aes-gcm.c @@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else - nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); + nbytes -= AES_BLOCK_SIZE; csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; diff --git a/trunk/drivers/crypto/nx/nx-sha256.c b/trunk/drivers/crypto/nx/nx-sha256.c index 67024f2f0b78..9767315f8c0b 100644 --- a/trunk/drivers/crypto/nx/nx-sha256.c +++ b/trunk/drivers/crypto/nx/nx-sha256.c @@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover */ - if (len + sctx->count < SHA256_BLOCK_SIZE) { + if (len + sctx->count <= SHA256_BLOCK_SIZE) { memcpy(sctx->buf + sctx->count, data, len); sctx->count += len; goto out; @@ -110,8 +110,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, atomic_inc(&(nx_ctx->stats->sha256_ops)); /* copy the leftover back into the state struct */ - if (leftover) - memcpy(sctx->buf, data + len - leftover, leftover); + memcpy(sctx->buf, data + len - leftover, leftover); sctx->count = leftover; csbcpb->cpb.sha256.message_bit_length += (u64) @@ -131,7 +130,6 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) struct nx_sg *in_sg, *out_sg; int rc; - if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ @@ -164,7 +162,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) atomic_inc(&(nx_ctx->stats->sha256_ops)); - atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8, + atomic64_add(csbcpb->cpb.sha256.message_bit_length, &(nx_ctx->stats->sha256_bytes)); memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: diff --git a/trunk/drivers/crypto/nx/nx-sha512.c b/trunk/drivers/crypto/nx/nx-sha512.c index 08eee1122349..3177b8c3d5f1 100644 --- a/trunk/drivers/crypto/nx/nx-sha512.c +++ b/trunk/drivers/crypto/nx/nx-sha512.c @@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover */ - if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) { + if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) { memcpy(sctx->buf + sctx->count[0], data, len); sctx->count[0] += len; goto out; @@ -110,8 +110,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, atomic_inc(&(nx_ctx->stats->sha512_ops)); /* copy the leftover back into the state struct */ - if (leftover) - memcpy(sctx->buf, data + len - leftover, leftover); + memcpy(sctx->buf, data + len - leftover, leftover); sctx->count[0] = leftover; spbc_bits = csbcpb->cpb.sha512.spbc * 8; @@ -169,7 +168,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) goto out; atomic_inc(&(nx_ctx->stats->sha512_ops)); - atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8, + atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo, &(nx_ctx->stats->sha512_bytes)); memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); diff --git a/trunk/drivers/crypto/nx/nx.c b/trunk/drivers/crypto/nx/nx.c index bbdab6e5ccf0..c767f232e693 100644 --- a/trunk/drivers/crypto/nx/nx.c +++ b/trunk/drivers/crypto/nx/nx.c @@ -211,20 +211,44 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, { struct nx_sg *nx_insg = nx_ctx->in_sg; struct nx_sg *nx_outsg = nx_ctx->out_sg; + struct blkcipher_walk walk; + int rc; + + blkcipher_walk_init(&walk, dst, src, nbytes); + rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + if (rc) + goto out; if (iv) - memcpy(iv, desc->info, AES_BLOCK_SIZE); + memcpy(iv, walk.iv, AES_BLOCK_SIZE); - nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); - nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); + while (walk.nbytes) { + nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, + walk.nbytes, nx_ctx->ap->sglen); + nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, + walk.nbytes, nx_ctx->ap->sglen); + + rc = blkcipher_walk_done(desc, &walk, 0); + if (rc) + break; + } + + if (walk.nbytes) { + nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, + walk.nbytes, nx_ctx->ap->sglen); + nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, + walk.nbytes, nx_ctx->ap->sglen); + + rc = 0; + } /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear * buffers */ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); - - return 0; +out: + return rc; } /** @@ -430,8 +454,6 @@ static int nx_register_algs(void) if (rc) goto out; - nx_driver.of.status = NX_OKAY; - rc = crypto_register_alg(&nx_ecb_aes_alg); if (rc) goto out; @@ -476,6 +498,8 @@ static int nx_register_algs(void) if (rc) goto out_unreg_s512; + nx_driver.of.status = NX_OKAY; + goto out; out_unreg_s512: diff --git a/trunk/drivers/crypto/sahara.c b/trunk/drivers/crypto/sahara.c index c3dc1c04a5df..a97bb6c1596c 100644 --- a/trunk/drivers/crypto/sahara.c +++ b/trunk/drivers/crypto/sahara.c @@ -863,7 +863,7 @@ static struct of_device_id sahara_dt_ids[] = { { .compatible = "fsl,imx27-sahara" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, sahara_dt_ids); +MODULE_DEVICE_TABLE(platform, sahara_dt_ids); static int sahara_probe(struct platform_device *pdev) { diff --git a/trunk/drivers/dma/acpi-dma.c b/trunk/drivers/dma/acpi-dma.c index 5a18f82f732a..ba6fc62e9651 100644 --- a/trunk/drivers/dma/acpi-dma.c +++ b/trunk/drivers/dma/acpi-dma.c @@ -4,8 +4,7 @@ * Based on of-dma.c * * Copyright (C) 2013, Intel Corporation - * Authors: Andy Shevchenko - * Mika Westerberg + * Author: Andy Shevchenko * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -17,124 +16,12 @@ #include #include #include -#include #include #include static LIST_HEAD(acpi_dma_list); static DEFINE_MUTEX(acpi_dma_lock); -/** - * acpi_dma_parse_resource_group - match device and parse resource group - * @grp: CSRT resource group - * @adev: ACPI device to match with - * @adma: struct acpi_dma of the given DMA controller - * - * Returns 1 on success, 0 when no information is available, or appropriate - * errno value on error. - * - * In order to match a device from DSDT table to the corresponding CSRT device - * we use MMIO address and IRQ. - */ -static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, - struct acpi_device *adev, struct acpi_dma *adma) -{ - const struct acpi_csrt_shared_info *si; - struct list_head resource_list; - struct resource_list_entry *rentry; - resource_size_t mem = 0, irq = 0; - u32 vendor_id; - int ret; - - if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info)) - return -ENODEV; - - INIT_LIST_HEAD(&resource_list); - ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); - if (ret <= 0) - return 0; - - list_for_each_entry(rentry, &resource_list, node) { - if (resource_type(&rentry->res) == IORESOURCE_MEM) - mem = rentry->res.start; - else if (resource_type(&rentry->res) == IORESOURCE_IRQ) - irq = rentry->res.start; - } - - acpi_dev_free_resource_list(&resource_list); - - /* Consider initial zero values as resource not found */ - if (mem == 0 && irq == 0) - return 0; - - si = (const struct acpi_csrt_shared_info *)&grp[1]; - - /* Match device by MMIO and IRQ */ - if (si->mmio_base_low != mem || si->gsi_interrupt != irq) - return 0; - - vendor_id = le32_to_cpu(grp->vendor_id); - dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", - (char *)&vendor_id, grp->device_id, grp->revision); - - /* Check if the request line range is available */ - if (si->base_request_line == 0 && si->num_handshake_signals == 0) - return 0; - - adma->base_request_line = si->base_request_line; - adma->end_request_line = si->base_request_line + - si->num_handshake_signals - 1; - - dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n", - adma->base_request_line, adma->end_request_line); - - return 1; -} - -/** - * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources - * @adev: ACPI device to match with - * @adma: struct acpi_dma of the given DMA controller - * - * CSRT or Core System Resources Table is a proprietary ACPI table - * introduced by Microsoft. This table can contain devices that are not in - * the system DSDT table. In particular DMA controllers might be described - * here. - * - * We are using this table to get the request line range of the specific DMA - * controller to be used later. - * - */ -static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) -{ - struct acpi_csrt_group *grp, *end; - struct acpi_table_csrt *csrt; - acpi_status status; - int ret; - - status = acpi_get_table(ACPI_SIG_CSRT, 0, - (struct acpi_table_header **)&csrt); - if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) - dev_warn(&adev->dev, "failed to get the CSRT table\n"); - return; - } - - grp = (struct acpi_csrt_group *)(csrt + 1); - end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length); - - while (grp < end) { - ret = acpi_dma_parse_resource_group(grp, adev, adma); - if (ret < 0) { - dev_warn(&adev->dev, - "error in parsing resource group\n"); - return; - } - - grp = (struct acpi_csrt_group *)((void *)grp + grp->length); - } -} - /** * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers * @dev: struct device of DMA controller @@ -174,8 +61,6 @@ int acpi_dma_controller_register(struct device *dev, adma->acpi_dma_xlate = acpi_dma_xlate; adma->data = data; - acpi_dma_parse_csrt(adev, adma); - /* Now queue acpi_dma controller structure in list */ mutex_lock(&acpi_dma_lock); list_add_tail(&adma->dma_controllers, &acpi_dma_list); @@ -264,45 +149,6 @@ void devm_acpi_dma_controller_free(struct device *dev) } EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); -/** - * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function - * @adma: struct acpi_dma of DMA controller - * @dma_spec: dma specifier to update - * - * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. - * - * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource - * Descriptor": - * DMA Request Line bits is a platform-relative number uniquely - * identifying the request line assigned. Request line-to-Controller - * mapping is done in a controller-specific OS driver. - * That's why we can safely adjust slave_id when the appropriate controller is - * found. - */ -static int acpi_dma_update_dma_spec(struct acpi_dma *adma, - struct acpi_dma_spec *dma_spec) -{ - /* Set link to the DMA controller device */ - dma_spec->dev = adma->dev; - - /* Check if the request line range is available */ - if (adma->base_request_line == 0 && adma->end_request_line == 0) - return 0; - - /* Check if slave_id falls to the range */ - if (dma_spec->slave_id < adma->base_request_line || - dma_spec->slave_id > adma->end_request_line) - return -1; - - /* - * Here we adjust slave_id. It should be a relative number to the base - * request line. - */ - dma_spec->slave_id -= adma->base_request_line; - - return 1; -} - struct acpi_dma_parser_data { struct acpi_dma_spec dma_spec; size_t index; @@ -347,7 +193,6 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, struct acpi_device *adev; struct acpi_dma *adma; struct dma_chan *chan = NULL; - int found; /* Check if the device was enumerated by ACPI */ if (!dev || !ACPI_HANDLE(dev)) @@ -374,20 +219,9 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, mutex_lock(&acpi_dma_lock); list_for_each_entry(adma, &acpi_dma_list, dma_controllers) { - /* - * We are not going to call translation function if slave_id - * doesn't fall to the request range. - */ - found = acpi_dma_update_dma_spec(adma, dma_spec); - if (found < 0) - continue; + dma_spec->dev = adma->dev; chan = adma->acpi_dma_xlate(dma_spec, adma); - /* - * Try to get a channel only from the DMA controller that - * matches the slave_id. See acpi_dma_update_dma_spec() - * description for the details. - */ - if (found > 0 || chan) + if (chan) break; } diff --git a/trunk/drivers/dma/dmatest.c b/trunk/drivers/dma/dmatest.c index e88ded2c8d2f..d8ce4ecfef18 100644 --- a/trunk/drivers/dma/dmatest.c +++ b/trunk/drivers/dma/dmatest.c @@ -716,7 +716,8 @@ static int dmatest_func(void *data) } dma_async_issue_pending(chan); - wait_event_freezable_timeout(done_wait, done.done, + wait_event_freezable_timeout(done_wait, + done.done || kthread_should_stop(), msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); @@ -996,6 +997,7 @@ static void stop_threaded_test(struct dmatest_info *info) static int __restart_threaded_test(struct dmatest_info *info, bool run) { struct dmatest_params *params = &info->params; + int ret; /* Stop any running test first */ __stop_threaded_test(info); @@ -1010,23 +1012,13 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run) memcpy(params, &info->dbgfs_params, sizeof(*params)); /* Run test with new parameters */ - return __run_threaded_test(info); -} - -static bool __is_threaded_test_run(struct dmatest_info *info) -{ - struct dmatest_chan *dtc; - - list_for_each_entry(dtc, &info->channels, node) { - struct dmatest_thread *thread; - - list_for_each_entry(thread, &dtc->threads, node) { - if (!thread->done) - return true; - } + ret = __run_threaded_test(info); + if (ret) { + __stop_threaded_test(info); + pr_err("dmatest: Can't run test\n"); } - return false; + return ret; } static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, @@ -1099,10 +1091,22 @@ static ssize_t dtf_read_run(struct file *file, char __user *user_buf, { struct dmatest_info *info = file->private_data; char buf[3]; + struct dmatest_chan *dtc; + bool alive = false; mutex_lock(&info->lock); + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (!thread->done) { + alive = true; + break; + } + } + } - if (__is_threaded_test_run(info)) { + if (alive) { buf[0] = 'Y'; } else { __stop_threaded_test(info); @@ -1128,12 +1132,7 @@ static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, if (strtobool(buf, &bv) == 0) { mutex_lock(&info->lock); - - if (__is_threaded_test_run(info)) - ret = -EBUSY; - else - ret = __restart_threaded_test(info, bv); - + ret = __restart_threaded_test(info, bv); mutex_unlock(&info->lock); } diff --git a/trunk/drivers/dma/ste_dma40.c b/trunk/drivers/dma/ste_dma40.c index 71bf4ec300ea..1734feec47b1 100644 --- a/trunk/drivers/dma/ste_dma40.c +++ b/trunk/drivers/dma/ste_dma40.c @@ -1566,12 +1566,10 @@ static void dma_tc_handle(struct d40_chan *d40c) return; } - if (d40_queue_start(d40c) == NULL) { + if (d40_queue_start(d40c) == NULL) d40c->busy = false; - - pm_runtime_mark_last_busy(d40c->base->dev); - pm_runtime_put_autosuspend(d40c->base->dev); - } + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); d40_desc_remove(d40d); d40_desc_done(d40c, d40d); diff --git a/trunk/drivers/dma/tegra20-apb-dma.c b/trunk/drivers/dma/tegra20-apb-dma.c index 33f59ecd256e..ce193409ebd3 100644 --- a/trunk/drivers/dma/tegra20-apb-dma.c +++ b/trunk/drivers/dma/tegra20-apb-dma.c @@ -1273,6 +1273,11 @@ static int tegra_dma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tdma); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "No mem resource for DMA\n"); + return -EINVAL; + } + tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(tdma->base_addr)) return PTR_ERR(tdma->base_addr); diff --git a/trunk/drivers/edac/amd64_edac_inj.c b/trunk/drivers/edac/amd64_edac_inj.c index 845f04786c2d..8c171fa1cb9b 100644 --- a/trunk/drivers/edac/amd64_edac_inj.c +++ b/trunk/drivers/edac/amd64_edac_inj.c @@ -202,9 +202,9 @@ static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR, amd64_inject_word_show, amd64_inject_word_store); static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR, amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store); -static DEVICE_ATTR(inject_write, S_IWUSR, +static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR, NULL, amd64_inject_write_store); -static DEVICE_ATTR(inject_read, S_IWUSR, +static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR, NULL, amd64_inject_read_store); diff --git a/trunk/drivers/firmware/efi/efivars.c b/trunk/drivers/firmware/efi/efivars.c index 8bd1bb6dbe47..b623c599e572 100644 --- a/trunk/drivers/firmware/efi/efivars.c +++ b/trunk/drivers/firmware/efi/efivars.c @@ -523,11 +523,13 @@ static void efivar_update_sysfs_entries(struct work_struct *work) struct efivar_entry *entry; int err; + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + /* Add new sysfs entries */ while (1) { - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return; + memset(entry, 0, sizeof(*entry)); err = efivar_init(efivar_update_sysfs_entry, entry, true, false, &efivar_sysfs_list); diff --git a/trunk/drivers/gpio/Kconfig b/trunk/drivers/gpio/Kconfig index 573c449c49b9..87d567089f13 100644 --- a/trunk/drivers/gpio/Kconfig +++ b/trunk/drivers/gpio/Kconfig @@ -636,7 +636,7 @@ config GPIO_MAX7301 config GPIO_MCP23S08 tristate "Microchip MCP23xxx I/O expander" - depends on (SPI_MASTER && !I2C) || I2C + depends on SPI_MASTER || I2C help SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017 I/O expanders. diff --git a/trunk/drivers/gpio/gpio-langwell.c b/trunk/drivers/gpio/gpio-langwell.c index 62ef10a641c4..634c3d37f7b5 100644 --- a/trunk/drivers/gpio/gpio-langwell.c +++ b/trunk/drivers/gpio/gpio-langwell.c @@ -324,7 +324,6 @@ static int lnw_gpio_probe(struct pci_dev *pdev, resource_size_t start, len; struct lnw_gpio *lnw; u32 gpio_base; - u32 irq_base; int retval; int ngpio = id->driver_data; @@ -346,7 +345,6 @@ static int lnw_gpio_probe(struct pci_dev *pdev, retval = -EFAULT; goto err_ioremap; } - irq_base = *(u32 *)base; gpio_base = *((u32 *)base + 1); /* release the IO mapping, since we already get the info from bar1 */ iounmap(base); @@ -367,6 +365,13 @@ static int lnw_gpio_probe(struct pci_dev *pdev, goto err_ioremap; } + lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio, + &lnw_gpio_irq_ops, lnw); + if (!lnw->domain) { + retval = -ENOMEM; + goto err_ioremap; + } + lnw->reg_base = base; lnw->chip.label = dev_name(&pdev->dev); lnw->chip.request = lnw_gpio_request; @@ -379,14 +384,6 @@ static int lnw_gpio_probe(struct pci_dev *pdev, lnw->chip.ngpio = ngpio; lnw->chip.can_sleep = 0; lnw->pdev = pdev; - - lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base, - &lnw_gpio_irq_ops, lnw); - if (!lnw->domain) { - retval = -ENOMEM; - goto err_ioremap; - } - pci_set_drvdata(pdev, lnw); retval = gpiochip_add(&lnw->chip); if (retval) { diff --git a/trunk/drivers/gpio/gpio-ml-ioh.c b/trunk/drivers/gpio/gpio-ml-ioh.c index 0966f2637ad2..b73366523fae 100644 --- a/trunk/drivers/gpio/gpio-ml-ioh.c +++ b/trunk/drivers/gpio/gpio-ml-ioh.c @@ -496,7 +496,8 @@ static int ioh_gpio_probe(struct pci_dev *pdev, err_gpiochip_add: while (--i >= 0) { chip--; - if (gpiochip_remove(&chip->gpio)) + ret = gpiochip_remove(&chip->gpio); + if (ret) dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i); } kfree(chip_save); diff --git a/trunk/drivers/gpio/gpio-mvebu.c b/trunk/drivers/gpio/gpio-mvebu.c index 3a4816adc137..bf69a7eff370 100644 --- a/trunk/drivers/gpio/gpio-mvebu.c +++ b/trunk/drivers/gpio/gpio-mvebu.c @@ -619,6 +619,11 @@ static int mvebu_gpio_probe(struct platform_device *pdev) * per-CPU registers */ if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) { res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(&pdev->dev, "Cannot get memory resource\n"); + return -ENODEV; + } + mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mvchip->percpu_membase)) diff --git a/trunk/drivers/gpio/gpio-mxs.c b/trunk/drivers/gpio/gpio-mxs.c index f8e6af20dfbf..25000b0f8453 100644 --- a/trunk/drivers/gpio/gpio-mxs.c +++ b/trunk/drivers/gpio/gpio-mxs.c @@ -326,8 +326,7 @@ static int mxs_gpio_probe(struct platform_device *pdev) err = bgpio_init(&port->bgc, &pdev->dev, 4, port->base + PINCTRL_DIN(port), - port->base + PINCTRL_DOUT(port) + MXS_SET, - port->base + PINCTRL_DOUT(port) + MXS_CLR, + port->base + PINCTRL_DOUT(port), NULL, port->base + PINCTRL_DOE(port), NULL, 0); if (err) goto out_irqdesc_free; diff --git a/trunk/drivers/gpio/gpio-omap.c b/trunk/drivers/gpio/gpio-omap.c index 4a430360af5a..2050891d9c65 100644 --- a/trunk/drivers/gpio/gpio-omap.c +++ b/trunk/drivers/gpio/gpio-omap.c @@ -69,7 +69,6 @@ struct gpio_bank { bool is_mpuio; bool dbck_flag; bool loses_context; - bool context_valid; int stride; u32 width; int context_loss_count; @@ -1094,9 +1093,6 @@ static int omap_gpio_probe(struct platform_device *pdev) const struct omap_gpio_platform_data *pdata; struct resource *res; struct gpio_bank *bank; -#ifdef CONFIG_ARCH_OMAP1 - int irq_base; -#endif match = of_match_device(of_match_ptr(omap_gpio_match), dev); @@ -1132,34 +1128,13 @@ static int omap_gpio_probe(struct platform_device *pdev) bank->loses_context = true; } else { bank->loses_context = pdata->loses_context; - - if (bank->loses_context) - bank->get_context_loss_count = - pdata->get_context_loss_count; } -#ifdef CONFIG_ARCH_OMAP1 - /* - * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop - * irq_alloc_descs() and irq_domain_add_legacy() and just use a - * linear IRQ domain mapping for all OMAP platforms. - */ - irq_base = irq_alloc_descs(-1, 0, bank->width, 0); - if (irq_base < 0) { - dev_err(dev, "Couldn't allocate IRQ numbers\n"); - return -ENODEV; - } - bank->domain = irq_domain_add_legacy(node, bank->width, irq_base, - 0, &irq_domain_simple_ops, NULL); -#else bank->domain = irq_domain_add_linear(node, bank->width, &irq_domain_simple_ops, NULL); -#endif - if (!bank->domain) { - dev_err(dev, "Couldn't register an IRQ domain\n"); + if (!bank->domain) return -ENODEV; - } if (bank->regs->set_dataout && bank->regs->clr_dataout) bank->set_dataout = _set_gpio_dataout_reg; @@ -1203,6 +1178,9 @@ static int omap_gpio_probe(struct platform_device *pdev) omap_gpio_chip_init(bank); omap_gpio_show_rev(bank); + if (bank->loses_context) + bank->get_context_loss_count = pdata->get_context_loss_count; + pm_runtime_put(bank->dev); list_add_tail(&bank->node, &omap_gpio_list); @@ -1281,8 +1259,6 @@ static int omap_gpio_runtime_suspend(struct device *dev) return 0; } -static void omap_gpio_init_context(struct gpio_bank *p); - static int omap_gpio_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -1292,20 +1268,6 @@ static int omap_gpio_runtime_resume(struct device *dev) int c; spin_lock_irqsave(&bank->lock, flags); - - /* - * On the first resume during the probe, the context has not - * been initialised and so initialise it now. Also initialise - * the context loss count. - */ - if (bank->loses_context && !bank->context_valid) { - omap_gpio_init_context(bank); - - if (bank->get_context_loss_count) - bank->context_loss_count = - bank->get_context_loss_count(bank->dev); - } - _gpio_dbck_enable(bank); /* @@ -1422,29 +1384,6 @@ void omap2_gpio_resume_after_idle(void) } #if defined(CONFIG_PM_RUNTIME) -static void omap_gpio_init_context(struct gpio_bank *p) -{ - struct omap_gpio_reg_offs *regs = p->regs; - void __iomem *base = p->base; - - p->context.ctrl = __raw_readl(base + regs->ctrl); - p->context.oe = __raw_readl(base + regs->direction); - p->context.wake_en = __raw_readl(base + regs->wkup_en); - p->context.leveldetect0 = __raw_readl(base + regs->leveldetect0); - p->context.leveldetect1 = __raw_readl(base + regs->leveldetect1); - p->context.risingdetect = __raw_readl(base + regs->risingdetect); - p->context.fallingdetect = __raw_readl(base + regs->fallingdetect); - p->context.irqenable1 = __raw_readl(base + regs->irqenable); - p->context.irqenable2 = __raw_readl(base + regs->irqenable2); - - if (regs->set_dataout && p->regs->clr_dataout) - p->context.dataout = __raw_readl(base + regs->set_dataout); - else - p->context.dataout = __raw_readl(base + regs->dataout); - - p->context_valid = true; -} - static void omap_gpio_restore_context(struct gpio_bank *bank) { __raw_writel(bank->context.wake_en, @@ -1482,7 +1421,6 @@ static void omap_gpio_restore_context(struct gpio_bank *bank) #else #define omap_gpio_runtime_suspend NULL #define omap_gpio_runtime_resume NULL -static void omap_gpio_init_context(struct gpio_bank *p) {} #endif static const struct dev_pm_ops gpio_pm_ops = { diff --git a/trunk/drivers/gpio/gpio-pch.c b/trunk/drivers/gpio/gpio-pch.c index 0fec097e838d..cdf599687cf7 100644 --- a/trunk/drivers/gpio/gpio-pch.c +++ b/trunk/drivers/gpio/gpio-pch.c @@ -424,7 +424,8 @@ static int pch_gpio_probe(struct pci_dev *pdev, err_request_irq: irq_free_descs(irq_base, gpio_pins[chip->ioh]); - if (gpiochip_remove(&chip->gpio)) + ret = gpiochip_remove(&chip->gpio); + if (ret) dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); err_gpiochip_add: diff --git a/trunk/drivers/gpio/gpio-sch.c b/trunk/drivers/gpio/gpio-sch.c index 5af65719b95d..1e4de16ceb41 100644 --- a/trunk/drivers/gpio/gpio-sch.c +++ b/trunk/drivers/gpio/gpio-sch.c @@ -272,8 +272,10 @@ static int sch_gpio_probe(struct platform_device *pdev) return 0; err_sch_gpio_resume: - if (gpiochip_remove(&sch_gpio_core)) - dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); + err = gpiochip_remove(&sch_gpio_core); + if (err) + dev_err(&pdev->dev, "%s failed, %d\n", + "gpiochip_remove()", err); err_sch_gpio_core: release_region(res->start, resource_size(res)); diff --git a/trunk/drivers/gpio/gpio-tegra.c b/trunk/drivers/gpio/gpio-tegra.c index 9a62672f1bed..da4cb5b0cb87 100644 --- a/trunk/drivers/gpio/gpio-tegra.c +++ b/trunk/drivers/gpio/gpio-tegra.c @@ -463,6 +463,11 @@ static int tegra_gpio_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Missing MEM resource\n"); + return -ENODEV; + } + regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); diff --git a/trunk/drivers/gpio/gpio-viperboard.c b/trunk/drivers/gpio/gpio-viperboard.c index 5ac2919197fe..095ab14cea4d 100644 --- a/trunk/drivers/gpio/gpio-viperboard.c +++ b/trunk/drivers/gpio/gpio-viperboard.c @@ -446,8 +446,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev) return ret; err_gpiob: - if (gpiochip_remove(&vb_gpio->gpioa)) - dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); + ret = gpiochip_remove(&vb_gpio->gpioa); err_gpioa: return ret; diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index e7e92429d10f..3a8f7e6db295 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -78,10 +78,6 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) { struct drm_crtc *crtc; - /* Locking is currently fubar in the panic handler. */ - if (oops_in_progress) - return; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) WARN_ON(!mutex_is_locked(&crtc->mutex)); @@ -250,7 +246,6 @@ char *drm_get_connector_status_name(enum drm_connector_status status) else return "unknown"; } -EXPORT_SYMBOL(drm_get_connector_status_name); /** * drm_mode_object_get - allocate a new modeset identifier diff --git a/trunk/drivers/gpu/drm/drm_crtc_helper.c b/trunk/drivers/gpu/drm/drm_crtc_helper.c index ed1334e27c33..e974f9309b72 100644 --- a/trunk/drivers/gpu/drm/drm_crtc_helper.c +++ b/trunk/drivers/gpu/drm/drm_crtc_helper.c @@ -121,7 +121,6 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, connector->helper_private; int count = 0; int mode_flags = 0; - bool verbose_prune = true; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); @@ -150,7 +149,6 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", connector->base.id, drm_get_connector_name(connector)); drm_mode_connector_update_edid_property(connector, NULL); - verbose_prune = false; goto prune; } @@ -184,7 +182,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, } prune: - drm_mode_prune_invalid(dev, &connector->modes, verbose_prune); + drm_mode_prune_invalid(dev, &connector->modes, true); if (list_empty(&connector->modes)) return 0; @@ -1007,20 +1005,12 @@ static void output_poll_execute(struct work_struct *work) continue; connector->status = connector->funcs->detect(connector, false); - if (old_status != connector->status) { - const char *old, *new; - - old = drm_get_connector_status_name(old_status); - new = drm_get_connector_status_name(connector->status); - - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] " - "status updated from %s to %s\n", - connector->base.id, - drm_get_connector_name(connector), - old, new); - + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", + connector->base.id, + drm_get_connector_name(connector), + old_status, connector->status); + if (old_status != connector->status) changed = true; - } } mutex_unlock(&dev->mode_config.mutex); @@ -1093,11 +1083,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) old_status = connector->status; connector->status = connector->funcs->detect(connector, false); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", connector->base.id, drm_get_connector_name(connector), - drm_get_connector_status_name(old_status), - drm_get_connector_status_name(connector->status)); + old_status, connector->status); if (old_status != connector->status) changed = true; } diff --git a/trunk/drivers/gpu/drm/drm_drv.c b/trunk/drivers/gpu/drm/drm_drv.c index 9cc247f55502..8d4f29075af5 100644 --- a/trunk/drivers/gpu/drm/drm_drv.c +++ b/trunk/drivers/gpu/drm/drm_drv.c @@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ - [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} + [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} /** Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { @@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp, { struct drm_file *file_priv = filp->private_data; struct drm_device *dev; - const struct drm_ioctl_desc *ioctl = NULL; + const struct drm_ioctl_desc *ioctl; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; @@ -392,6 +392,11 @@ long drm_ioctl(struct file *filp, atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; + DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", + task_pid_nr(current), cmd, nr, + (long)old_encode_dev(file_priv->minor->device), + file_priv->authenticated); + if ((nr >= DRM_CORE_IOCTL_COUNT) && ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) goto err_i1; @@ -412,11 +417,6 @@ long drm_ioctl(struct file *filp, } else goto err_i1; - DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", - task_pid_nr(current), - (long)old_encode_dev(file_priv->minor->device), - file_priv->authenticated, ioctl->name); - /* Do not trust userspace, use our own definition */ func = ioctl->func; /* is there a local override? */ @@ -471,12 +471,6 @@ long drm_ioctl(struct file *filp, } err_i1: - if (!ioctl) - DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", - task_pid_nr(current), - (long)old_encode_dev(file_priv->minor->device), - file_priv->authenticated, cmd, nr); - if (kdata != stack_kdata) kfree(kdata); atomic_dec(&dev->ioctl_count); diff --git a/trunk/drivers/gpu/drm/drm_encoder_slave.c b/trunk/drivers/gpu/drm/drm_encoder_slave.c index 0cfb60f54766..48c52f7df4e6 100644 --- a/trunk/drivers/gpu/drm/drm_encoder_slave.c +++ b/trunk/drivers/gpu/drm/drm_encoder_slave.c @@ -54,12 +54,16 @@ int drm_i2c_encoder_init(struct drm_device *dev, struct i2c_adapter *adap, const struct i2c_board_info *info) { + char modalias[sizeof(I2C_MODULE_PREFIX) + + I2C_NAME_SIZE]; struct module *module = NULL; struct i2c_client *client; struct drm_i2c_encoder_driver *encoder_drv; int err = 0; - request_module("%s%s", I2C_MODULE_PREFIX, info->type); + snprintf(modalias, sizeof(modalias), + "%s%s", I2C_MODULE_PREFIX, info->type); + request_module(modalias); client = i2c_new_device(adap, info); if (!client) { diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c index 8bcce7866d36..a6a8643a6a77 100644 --- a/trunk/drivers/gpu/drm/drm_irq.c +++ b/trunk/drivers/gpu/drm/drm_irq.c @@ -1054,7 +1054,7 @@ EXPORT_SYMBOL(drm_vblank_off); */ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) { - /* vblank is not initialized (IRQ not installed ?), or has been freed */ + /* vblank is not initialized (IRQ not installed ?) */ if (!dev->num_crtcs) return; /* @@ -1076,10 +1076,6 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc) { unsigned long irqflags; - /* vblank is not initialized (IRQ not installed ?), or has been freed */ - if (!dev->num_crtcs) - return; - if (dev->vblank_inmodeset[crtc]) { spin_lock_irqsave(&dev->vbl_lock, irqflags); dev->vblank_disable_allowed = 1; diff --git a/trunk/drivers/gpu/drm/drm_mm.c b/trunk/drivers/gpu/drm/drm_mm.c index 07cf99cc8862..db1e2d6f90d7 100644 --- a/trunk/drivers/gpu/drm/drm_mm.c +++ b/trunk/drivers/gpu/drm/drm_mm.c @@ -755,35 +755,33 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) EXPORT_SYMBOL(drm_mm_debug_table); #if defined(CONFIG_DEBUG_FS) -static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) -{ - unsigned long hole_start, hole_end, hole_size; - - if (entry->hole_follows) { - hole_start = drm_mm_hole_node_start(entry); - hole_end = drm_mm_hole_node_end(entry); - hole_size = hole_end - hole_start; - seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", - hole_start, hole_end, hole_size); - return hole_size; - } - - return 0; -} - int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) { struct drm_mm_node *entry; unsigned long total_used = 0, total_free = 0, total = 0; + unsigned long hole_start, hole_end, hole_size; - total_free += drm_mm_dump_hole(m, &mm->head_node); + hole_start = drm_mm_hole_node_start(&mm->head_node); + hole_end = drm_mm_hole_node_end(&mm->head_node); + hole_size = hole_end - hole_start; + if (hole_size) + seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", + hole_start, hole_end, hole_size); + total_free += hole_size; drm_mm_for_each_node(entry, mm) { seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", entry->start, entry->start + entry->size, entry->size); total_used += entry->size; - total_free += drm_mm_dump_hole(m, entry); + if (entry->hole_follows) { + hole_start = drm_mm_hole_node_start(entry); + hole_end = drm_mm_hole_node_end(entry); + hole_size = hole_end - hole_start; + seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", + hole_start, hole_end, hole_size); + total_free += hole_size; + } } total = total_free + total_used; diff --git a/trunk/drivers/gpu/drm/drm_modes.c b/trunk/drivers/gpu/drm/drm_modes.c index a371ff865a88..faa79df02648 100644 --- a/trunk/drivers/gpu/drm/drm_modes.c +++ b/trunk/drivers/gpu/drm/drm_modes.c @@ -1143,7 +1143,6 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, was_digit = false; } else goto done; - break; case '0' ... '9': was_digit = true; break; diff --git a/trunk/drivers/gpu/drm/drm_prime.c b/trunk/drivers/gpu/drm/drm_prime.c index 5b7b9110254b..dcde35231e25 100644 --- a/trunk/drivers/gpu/drm/drm_prime.c +++ b/trunk/drivers/gpu/drm/drm_prime.c @@ -190,7 +190,8 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, if (ret) return ERR_PTR(ret); } - return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); + return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, + 0600); } EXPORT_SYMBOL(drm_gem_prime_export); diff --git a/trunk/drivers/gpu/drm/drm_vm.c b/trunk/drivers/gpu/drm/drm_vm.c index 67969e25d60f..1d4f7c9fe661 100644 --- a/trunk/drivers/gpu/drm/drm_vm.c +++ b/trunk/drivers/gpu/drm/drm_vm.c @@ -617,6 +617,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: offset = drm_core_get_reg_ofs(dev); + vma->vm_flags |= VM_IO; /* not in core dump */ vma->vm_page_prot = drm_io_prot(map->type, vma); if (io_remap_pfn_range(vma, vma->vm_start, (map->offset + offset) >> PAGE_SHIFT, diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c index c200e4d71e3d..e8894bc9e6d5 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -48,8 +48,6 @@ struct exynos_drm_crtc { unsigned int pipe; unsigned int dpms; enum exynos_crtc_mode mode; - wait_queue_head_t pending_flip_queue; - atomic_t pending_flip; }; static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -63,13 +61,6 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) return; } - if (mode > DRM_MODE_DPMS_ON) { - /* wait for the completion of page flip. */ - wait_event(exynos_crtc->pending_flip_queue, - atomic_read(&exynos_crtc->pending_flip) == 0); - drm_vblank_off(crtc->dev, exynos_crtc->pipe); - } - exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms); exynos_crtc->dpms = mode; } @@ -226,6 +217,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, ret = drm_vblank_get(dev, exynos_crtc->pipe); if (ret) { DRM_DEBUG("failed to acquire vblank counter\n"); + list_del(&event->base.link); goto out; } @@ -233,7 +225,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, spin_lock_irq(&dev->event_lock); list_add_tail(&event->base.link, &dev_priv->pageflip_event_list); - atomic_set(&exynos_crtc->pending_flip, 1); spin_unlock_irq(&dev->event_lock); crtc->fb = fb; @@ -353,8 +344,6 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) exynos_crtc->pipe = nr; exynos_crtc->dpms = DRM_MODE_DPMS_OFF; - init_waitqueue_head(&exynos_crtc->pending_flip_queue); - atomic_set(&exynos_crtc->pending_flip, 0); exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true); if (!exynos_crtc->plane) { kfree(exynos_crtc); @@ -409,8 +398,7 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc) { struct exynos_drm_private *dev_priv = dev->dev_private; struct drm_pending_vblank_event *e, *t; - struct drm_crtc *drm_crtc = dev_priv->crtc[crtc]; - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc); + struct timeval now; unsigned long flags; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -423,11 +411,14 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc) if (crtc != e->pipe) continue; - list_del(&e->base.link); - drm_send_vblank_event(dev, -1, e); + do_gettimeofday(&now); + e->event.sequence = 0; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + + list_move_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); drm_vblank_put(dev, crtc); - atomic_set(&exynos_crtc->pending_flip, 0); - wake_up(&exynos_crtc->pending_flip_queue); } spin_unlock_irqrestore(&dev->event_lock, flags); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 8f007aaeffc3..68f0045f86b8 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -182,7 +182,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem_obj->base); - if (IS_ERR(helper->fb)) { + if (IS_ERR_OR_NULL(helper->fb)) { DRM_ERROR("failed to create drm framebuffer.\n"); ret = PTR_ERR(helper->fb); goto err_destroy_gem; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 4a1616a18ab7..773f583fa964 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -12,9 +12,9 @@ * */ #include +#include #include #include -#include #include #include #include @@ -1845,7 +1845,7 @@ static int fimc_probe(struct platform_device *pdev) } ctx->irq = res->start; - ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler, + ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler, IRQF_ONESHOT, "drm_fimc", ctx); if (ret < 0) { dev_err(dev, "failed to request irq.\n"); @@ -1854,7 +1854,7 @@ static int fimc_probe(struct platform_device *pdev) ret = fimc_setup_clocks(ctx); if (ret < 0) - return ret; + goto err_free_irq; ippdrv = &ctx->ippdrv; ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops; @@ -1884,7 +1884,7 @@ static int fimc_probe(struct platform_device *pdev) goto err_pm_dis; } - dev_info(dev, "drm fimc registered successfully.\n"); + dev_info(&pdev->dev, "drm fimc registered successfully.\n"); return 0; @@ -1892,6 +1892,8 @@ static int fimc_probe(struct platform_device *pdev) pm_runtime_disable(dev); err_put_clk: fimc_put_clocks(ctx); +err_free_irq: + free_irq(ctx->irq, ctx); return ret; } @@ -1909,6 +1911,8 @@ static int fimc_remove(struct platform_device *pdev) pm_runtime_set_suspended(dev); pm_runtime_disable(dev); + free_irq(ctx->irq, ctx); + return 0; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 97c61dbffd82..746b282b343a 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -885,7 +885,7 @@ static int fimd_probe(struct platform_device *pdev) DRM_DEBUG_KMS("%s\n", __FILE__); - if (dev->of_node) { + if (pdev->dev.of_node) { pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { DRM_ERROR("memory allocation for pdata failed\n"); @@ -899,7 +899,7 @@ static int fimd_probe(struct platform_device *pdev) return ret; } } else { - pdata = dev->platform_data; + pdata = pdev->dev.platform_data; if (!pdata) { DRM_ERROR("no platform data specified\n"); return -EINVAL; @@ -912,7 +912,7 @@ static int fimd_probe(struct platform_device *pdev) return -EINVAL; } - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -930,7 +930,7 @@ static int fimd_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->regs = devm_ioremap_resource(dev, res); + ctx->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); @@ -942,7 +942,7 @@ static int fimd_probe(struct platform_device *pdev) ctx->irq = res->start; - ret = devm_request_irq(dev, ctx->irq, fimd_irq_handler, + ret = devm_request_irq(&pdev->dev, ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx); if (ret) { dev_err(dev, "irq request failed.\n"); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c index af75434ee4d7..47a493c8a71f 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1379,7 +1379,7 @@ static int g2d_probe(struct platform_device *pdev) struct exynos_drm_subdrv *subdrv; int ret; - g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL); + g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL); if (!g2d) { dev_err(dev, "failed to allocate driver data\n"); return -ENOMEM; @@ -1417,7 +1417,7 @@ static int g2d_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - g2d->regs = devm_ioremap_resource(dev, res); + g2d->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(g2d->regs)) { ret = PTR_ERR(g2d->regs); goto err_put_clk; @@ -1430,7 +1430,7 @@ static int g2d_probe(struct platform_device *pdev) goto err_put_clk; } - ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0, + ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d); if (ret < 0) { dev_err(dev, "irq request failed\n"); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 762f40d548b7..7841c3b8a20e 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1704,7 +1704,7 @@ static int gsc_probe(struct platform_device *pdev) } ctx->irq = res->start; - ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler, + ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler, IRQF_ONESHOT, "drm_gsc", ctx); if (ret < 0) { dev_err(dev, "failed to request irq.\n"); @@ -1725,7 +1725,7 @@ static int gsc_probe(struct platform_device *pdev) ret = gsc_init_prop_list(ippdrv); if (ret < 0) { dev_err(dev, "failed to init property list.\n"); - return ret; + goto err_get_irq; } DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id, @@ -1743,12 +1743,15 @@ static int gsc_probe(struct platform_device *pdev) goto err_ippdrv_register; } - dev_info(dev, "drm gsc registered successfully.\n"); + dev_info(&pdev->dev, "drm gsc registered successfully.\n"); return 0; err_ippdrv_register: + devm_kfree(dev, ippdrv->prop_list); pm_runtime_disable(dev); +err_get_irq: + free_irq(ctx->irq, ctx); return ret; } @@ -1758,12 +1761,15 @@ static int gsc_remove(struct platform_device *pdev) struct gsc_context *ctx = get_gsc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; + devm_kfree(dev, ippdrv->prop_list); exynos_drm_ippdrv_unregister(ippdrv); mutex_destroy(&ctx->lock); pm_runtime_set_suspended(dev); pm_runtime_disable(dev); + free_irq(ctx->irq, ctx); + return 0; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index 437fb947e46d..ba2f0f1aa05f 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -442,7 +442,7 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev) DRM_DEBUG_KMS("%s\n", __FILE__); - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) { DRM_LOG_KMS("failed to alloc common hdmi context.\n"); return -ENOMEM; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_ipp.c index be1e88463466..29d2ad314490 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -222,7 +222,7 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, /* find ipp driver using idr */ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id); - if (IS_ERR(ippdrv)) { + if (IS_ERR_OR_NULL(ippdrv)) { DRM_ERROR("not found ipp%d driver.\n", ipp_id); return ippdrv; } @@ -388,7 +388,7 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property) DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id); ippdrv = ipp_find_drv_by_handle(prop_id); - if (IS_ERR(ippdrv)) { + if (IS_ERR_OR_NULL(ippdrv)) { DRM_ERROR("failed to get ipp driver.\n"); return -EINVAL; } @@ -492,7 +492,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, /* find ipp driver using ipp id */ ippdrv = ipp_find_driver(ctx, property); - if (IS_ERR(ippdrv)) { + if (IS_ERR_OR_NULL(ippdrv)) { DRM_ERROR("failed to get ipp driver.\n"); return -EINVAL; } @@ -521,19 +521,19 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, c_node->state = IPP_STATE_IDLE; c_node->start_work = ipp_create_cmd_work(); - if (IS_ERR(c_node->start_work)) { + if (IS_ERR_OR_NULL(c_node->start_work)) { DRM_ERROR("failed to create start work.\n"); goto err_clear; } c_node->stop_work = ipp_create_cmd_work(); - if (IS_ERR(c_node->stop_work)) { + if (IS_ERR_OR_NULL(c_node->stop_work)) { DRM_ERROR("failed to create stop work.\n"); goto err_free_start; } c_node->event_work = ipp_create_event_work(); - if (IS_ERR(c_node->event_work)) { + if (IS_ERR_OR_NULL(c_node->event_work)) { DRM_ERROR("failed to create event work.\n"); goto err_free_stop; } @@ -915,7 +915,7 @@ static int ipp_queue_buf_with_run(struct device *dev, DRM_DEBUG_KMS("%s\n", __func__); ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); - if (IS_ERR(ippdrv)) { + if (IS_ERR_OR_NULL(ippdrv)) { DRM_ERROR("failed to get ipp driver.\n"); return -EFAULT; } @@ -1909,7 +1909,7 @@ static int ipp_probe(struct platform_device *pdev) struct exynos_drm_subdrv *subdrv; int ret; - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -1963,7 +1963,7 @@ static int ipp_probe(struct platform_device *pdev) goto err_cmd_workq; } - dev_info(dev, "drm ipp registered successfully.\n"); + dev_info(&pdev->dev, "drm ipp registered successfully.\n"); return 0; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 9b6c70964d71..947f09f15ad1 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -666,8 +666,8 @@ static int rotator_probe(struct platform_device *pdev) return rot->irq; } - ret = devm_request_threaded_irq(dev, rot->irq, NULL, - rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot); + ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler, + IRQF_ONESHOT, "drm_rotator", rot); if (ret < 0) { dev_err(dev, "failed to request irq\n"); return ret; @@ -676,7 +676,8 @@ static int rotator_probe(struct platform_device *pdev) rot->clock = devm_clk_get(dev, "rotator"); if (IS_ERR(rot->clock)) { dev_err(dev, "failed to get clock\n"); - return PTR_ERR(rot->clock); + ret = PTR_ERR(rot->clock); + goto err_clk_get; } pm_runtime_enable(dev); @@ -708,7 +709,10 @@ static int rotator_probe(struct platform_device *pdev) return 0; err_ippdrv_register: + devm_kfree(dev, ippdrv->prop_list); pm_runtime_disable(dev); +err_clk_get: + free_irq(rot->irq, rot); return ret; } @@ -718,10 +722,13 @@ static int rotator_remove(struct platform_device *pdev) struct rot_context *rot = dev_get_drvdata(dev); struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; + devm_kfree(dev, ippdrv->prop_list); exynos_drm_ippdrv_unregister(ippdrv); pm_runtime_disable(dev); + free_irq(rot->irq, rot); + return 0; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 24376c194a5e..9504b0cd825a 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -594,7 +594,7 @@ static int vidi_probe(struct platform_device *pdev) DRM_DEBUG_KMS("%s\n", __FILE__); - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -612,7 +612,7 @@ static int vidi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ctx); - ret = device_create_file(dev, &dev_attr_connection); + ret = device_create_file(&pdev->dev, &dev_attr_connection); if (ret < 0) DRM_INFO("failed to create connection sysfs.\n"); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c index fd1426dca882..bbfc3840080c 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1946,14 +1946,14 @@ static int hdmi_probe(struct platform_device *pdev) DRM_DEBUG_KMS("[%d]\n", __LINE__); - if (dev->of_node) { + if (pdev->dev.of_node) { pdata = drm_hdmi_dt_parse_pdata(dev); if (IS_ERR(pdata)) { DRM_ERROR("failed to parse dt\n"); return PTR_ERR(pdata); } } else { - pdata = dev->platform_data; + pdata = pdev->dev.platform_data; } if (!pdata) { @@ -1961,14 +1961,14 @@ static int hdmi_probe(struct platform_device *pdev) return -EINVAL; } - drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), + drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL); if (!drm_hdmi_ctx) { DRM_ERROR("failed to allocate common hdmi context.\n"); return -ENOMEM; } - hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), + hdata = devm_kzalloc(&pdev->dev, sizeof(struct hdmi_context), GFP_KERNEL); if (!hdata) { DRM_ERROR("out of memory\n"); @@ -1985,7 +1985,7 @@ static int hdmi_probe(struct platform_device *pdev) if (dev->of_node) { const struct of_device_id *match; match = of_match_node(of_match_ptr(hdmi_match_types), - dev->of_node); + pdev->dev.of_node); if (match == NULL) return -ENODEV; hdata->type = (enum hdmi_type)match->data; @@ -2005,11 +2005,16 @@ static int hdmi_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hdata->regs = devm_ioremap_resource(dev, res); + if (!res) { + DRM_ERROR("failed to find registers\n"); + return -ENOENT; + } + + hdata->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hdata->regs)) return PTR_ERR(hdata->regs); - ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD"); + ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD"); if (ret) { DRM_ERROR("failed to request HPD gpio\n"); return ret; @@ -2041,7 +2046,7 @@ static int hdmi_probe(struct platform_device *pdev) hdata->hpd = gpio_get_value(hdata->hpd_gpio); - ret = devm_request_threaded_irq(dev, hdata->irq, NULL, + ret = request_threaded_irq(hdata->irq, NULL, hdmi_irq_thread, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "hdmi", drm_hdmi_ctx); @@ -2070,11 +2075,16 @@ static int hdmi_probe(struct platform_device *pdev) static int hdmi_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev); + struct hdmi_context *hdata = ctx->ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); pm_runtime_disable(dev); + free_irq(hdata->irq, hdata); + + /* hdmiphy i2c driver */ i2c_del_driver(&hdmiphy_driver); /* DDC i2c driver */ diff --git a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c index 7c197d3820c5..ec3e376b7e01 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1061,7 +1061,7 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx, return -ENXIO; } - mixer_res->mixer_regs = devm_ioremap(dev, res->start, + mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (mixer_res->mixer_regs == NULL) { dev_err(dev, "register mapping failed.\n"); @@ -1074,7 +1074,7 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx, return -ENXIO; } - ret = devm_request_irq(dev, res->start, mixer_irq_handler, + ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, 0, "drm_mixer", ctx); if (ret) { dev_err(dev, "request interrupt failed.\n"); @@ -1118,7 +1118,7 @@ static int vp_resources_init(struct exynos_drm_hdmi_context *ctx, return -ENXIO; } - mixer_res->vp_regs = devm_ioremap(dev, res->start, + mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (mixer_res->vp_regs == NULL) { dev_err(dev, "register mapping failed.\n"); @@ -1169,14 +1169,14 @@ static int mixer_probe(struct platform_device *pdev) dev_info(dev, "probe start\n"); - drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), + drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL); if (!drm_hdmi_ctx) { DRM_ERROR("failed to allocate common hdmi context.\n"); return -ENOMEM; } - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) { DRM_ERROR("failed to alloc mixer context.\n"); return -ENOMEM; @@ -1187,14 +1187,14 @@ static int mixer_probe(struct platform_device *pdev) if (dev->of_node) { const struct of_device_id *match; match = of_match_node(of_match_ptr(mixer_match_types), - dev->of_node); + pdev->dev.of_node); drv = (struct mixer_drv_data *)match->data; } else { drv = (struct mixer_drv_data *) platform_get_device_id(pdev)->driver_data; } - ctx->dev = dev; + ctx->dev = &pdev->dev; ctx->parent_ctx = (void *)drm_hdmi_ctx; drm_hdmi_ctx->ctx = (void *)ctx; ctx->vp_enabled = drv->is_vp_enabled; diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c index 82430ad8ba62..3cfd0931fbfb 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -1462,7 +1462,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc, size_t addr = 0; struct gtt_range *gt; struct drm_gem_object *obj; - int ret = 0; + int ret; /* if we want to turn of the cursor ignore width and height */ if (!handle) { @@ -1499,8 +1499,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc, if (obj->size < width * height * 4) { dev_dbg(dev->dev, "buffer is to small\n"); - ret = -ENOMEM; - goto unref_cursor; + return -ENOMEM; } gt = container_of(obj, struct gtt_range, gem); @@ -1509,7 +1508,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc, ret = psb_gtt_pin(gt); if (ret) { dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); - goto unref_cursor; + return ret; } addr = gt->offset; /* Or resource.start ??? */ @@ -1533,14 +1532,9 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc, struct gtt_range, gem); psb_gtt_unpin(gt); drm_gem_object_unreference(psb_intel_crtc->cursor_obj); + psb_intel_crtc->cursor_obj = obj; } - - psb_intel_crtc->cursor_obj = obj; - return ret; - -unref_cursor: - drm_gem_object_unreference(obj); - return ret; + return 0; } static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) @@ -1756,19 +1750,6 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc) kfree(psb_intel_crtc); } -static void cdv_intel_crtc_disable(struct drm_crtc *crtc) -{ - struct gtt_range *gt; - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); - - if (crtc->fb) { - gt = to_psb_fb(crtc->fb)->gtt; - psb_gtt_unpin(gt); - } -} - const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { .dpms = cdv_intel_crtc_dpms, .mode_fixup = cdv_intel_crtc_mode_fixup, @@ -1776,7 +1757,6 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { .mode_set_base = cdv_intel_pipe_set_base, .prepare = cdv_intel_crtc_prepare, .commit = cdv_intel_crtc_commit, - .disable = cdv_intel_crtc_disable, }; const struct drm_crtc_funcs cdv_intel_crtc_funcs = { diff --git a/trunk/drivers/gpu/drm/gma500/framebuffer.c b/trunk/drivers/gpu/drm/gma500/framebuffer.c index 8b1b6d923abe..1534e220097a 100644 --- a/trunk/drivers/gpu/drm/gma500/framebuffer.c +++ b/trunk/drivers/gpu/drm/gma500/framebuffer.c @@ -121,8 +121,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) unsigned long address; int ret; unsigned long pfn; - unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + - psbfb->gtt->offset; + /* FIXME: assumes fb at stolen base which may not be true */ + unsigned long phys_addr = (unsigned long)dev_priv->stolen_base; page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT); diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_display.c b/trunk/drivers/gpu/drm/gma500/psb_intel_display.c index 6666493789d1..6e8f42b61ff6 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_display.c @@ -843,7 +843,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt; struct drm_gem_object *obj; void *tmp_dst, *tmp_src; - int ret = 0, i, cursor_pages; + int ret, i, cursor_pages; /* if we want to turn of the cursor ignore width and height */ if (!handle) { @@ -880,8 +880,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, if (obj->size < width * height * 4) { dev_dbg(dev->dev, "buffer is to small\n"); - ret = -ENOMEM; - goto unref_cursor; + return -ENOMEM; } gt = container_of(obj, struct gtt_range, gem); @@ -890,14 +889,13 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, ret = psb_gtt_pin(gt); if (ret) { dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); - goto unref_cursor; + return ret; } if (dev_priv->ops->cursor_needs_phys) { if (cursor_gt == NULL) { dev_err(dev->dev, "No hardware cursor mem available"); - ret = -ENOMEM; - goto unref_cursor; + return -ENOMEM; } /* Prevent overflow */ @@ -938,14 +936,9 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, struct gtt_range, gem); psb_gtt_unpin(gt); drm_gem_object_unreference(psb_intel_crtc->cursor_obj); + psb_intel_crtc->cursor_obj = obj; } - - psb_intel_crtc->cursor_obj = obj; - return ret; - -unref_cursor: - drm_gem_object_unreference(obj); - return ret; + return 0; } static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) @@ -1157,19 +1150,6 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc) kfree(psb_intel_crtc); } -static void psb_intel_crtc_disable(struct drm_crtc *crtc) -{ - struct gtt_range *gt; - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); - - if (crtc->fb) { - gt = to_psb_fb(crtc->fb)->gtt; - psb_gtt_unpin(gt); - } -} - const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { .dpms = psb_intel_crtc_dpms, .mode_fixup = psb_intel_crtc_mode_fixup, @@ -1177,7 +1157,6 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { .mode_set_base = psb_intel_pipe_set_base, .prepare = psb_intel_crtc_prepare, .commit = psb_intel_crtc_commit, - .disable = psb_intel_crtc_disable, }; const struct drm_crtc_funcs psb_intel_crtc_funcs = { diff --git a/trunk/drivers/gpu/drm/i810/i810_dma.c b/trunk/drivers/gpu/drm/i810/i810_dma.c index ada49eda489f..004ecdfe1b55 100644 --- a/trunk/drivers/gpu/drm/i810/i810_dma.c +++ b/trunk/drivers/gpu/drm/i810/i810_dma.c @@ -97,7 +97,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) buf = dev_priv->mmap_buffer; buf_priv = buf->dev_private; - vma->vm_flags |= VM_DONTCOPY; + vma->vm_flags |= (VM_IO | VM_DONTCOPY); buf_priv->currently_mapped = I810_BUF_MAPPED; diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index a2e4953b8e8d..9ebe895c17d6 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -364,64 +364,40 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ - INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */ + INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ - INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */ + INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ - INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */ - INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */ - INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */ - INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */ - INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */ - INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */ INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ - INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */ + INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */ INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ - INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */ + INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */ INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ - INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */ - INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */ - INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */ - INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */ - INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */ - INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */ - INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */ + INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */ INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ - INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */ + INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */ INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ - INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */ + INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */ INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ - INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */ - INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */ - INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */ - INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */ - INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */ - INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */ - INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */ + INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ - INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */ + INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ - INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */ + INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ - INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */ - INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */ - INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */ - INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */ - INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */ - INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */ - INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */ + INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info), diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h index 9669a0b8b440..d5dcf7fe1ee9 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.h +++ b/trunk/drivers/gpu/drm/i915/i915_drv.h @@ -1697,8 +1697,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags); -void i915_gem_restore_fences(struct drm_device *dev); - /* i915_gem_context.c */ void i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); @@ -1945,19 +1943,4 @@ static inline void __user *to_user_ptr(u64 address) return (void __user *)(uintptr_t)address; } -static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) -{ - unsigned long j = msecs_to_jiffies(m); - - return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); -} - -static inline unsigned long -timespec_to_jiffies_timeout(const struct timespec *value) -{ - unsigned long j = timespec_to_jiffies(value); - - return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); -} - #endif diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index 9e35dafc5807..6be940effefd 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -91,11 +91,14 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) { int ret; -#define EXIT_COND (!i915_reset_in_progress(error) || \ - i915_terminally_wedged(error)) +#define EXIT_COND (!i915_reset_in_progress(error)) if (EXIT_COND) return 0; + /* GPU is already declared terminally dead, give up. */ + if (i915_terminally_wedged(error)) + return -EIO; + /* * Only wait 10 seconds for the gpu reset to complete to avoid hanging * userspace. If it takes that long something really bad is going on and @@ -1000,7 +1003,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, wait_forever = false; } - timeout_jiffies = timespec_to_jiffies_timeout(&wait_time); + timeout_jiffies = timespec_to_jiffies(&wait_time); if (WARN_ON(!ring->irq_get(ring))) return -ENODEV; @@ -1042,8 +1045,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, if (timeout) { struct timespec sleep_time = timespec_sub(now, before); *timeout = timespec_sub(*timeout, sleep_time); - if (!timespec_valid(timeout)) /* i.e. negative time remains */ - set_normalized_timespec(timeout, 0, 0); } switch (end) { @@ -1052,6 +1053,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, case -ERESTARTSYS: /* Signal */ return (int)end; case 0: /* Timeout */ + if (timeout) + set_normalized_timespec(timeout, 0, 0); return -ETIME; default: /* Completed */ WARN_ON(end < 0); /* We're not aware of other errors */ @@ -1801,14 +1804,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; gfp &= ~(__GFP_IO | __GFP_WAIT); } -#ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl()) { - st->nents++; - sg_set_page(sg, page, PAGE_SIZE, 0); - sg = sg_next(sg); - continue; - } -#endif + if (!i || page_to_pfn(page) != last_pfn + 1) { if (i) sg = sg_next(sg); @@ -1819,10 +1815,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) } last_pfn = page_to_pfn(page); } -#ifdef CONFIG_SWIOTLB - if (!swiotlb_nr_tbl()) -#endif - sg_mark_end(sg); + + sg_mark_end(sg); obj->pages = st; if (i915_gem_object_needs_bit17_swizzle(obj)) @@ -2126,15 +2120,25 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, } } -void i915_gem_restore_fences(struct drm_device *dev) +static void i915_gem_reset_fences(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; - i915_gem_write_fence(dev, i, reg->obj); + + if (reg->obj) + i915_gem_object_fence_lost(reg->obj); + + i915_gem_write_fence(dev, i, NULL); + + reg->pin_count = 0; + reg->obj = NULL; + INIT_LIST_HEAD(®->lru_list); } + + INIT_LIST_HEAD(&dev_priv->mm.fence_list); } void i915_gem_reset(struct drm_device *dev) @@ -2157,7 +2161,8 @@ void i915_gem_reset(struct drm_device *dev) obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } - i915_gem_restore_fences(dev); + /* The fence registers are invalidated so clear them out */ + i915_gem_reset_fences(dev); } /** @@ -2372,8 +2377,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) mutex_unlock(&dev->struct_mutex); ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); - if (timeout) + if (timeout) { + WARN_ON(!timespec_valid(timeout)); args->timeout_ns = timespec_to_ns(timeout); + } return ret; out: @@ -3863,6 +3870,8 @@ i915_gem_idle(struct drm_device *dev) if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_gem_evict_everything(dev); + i915_gem_reset_fences(dev); + /* Hack! Don't let anybody do execbuf while we don't control the chip. * We need to replace this with a semaphore, or something. * And not confound mm.suspended! @@ -4189,8 +4198,7 @@ i915_gem_load(struct drm_device *dev) dev_priv->num_fence_regs = 8; /* Initialize fence registers to zero */ - INIT_LIST_HEAD(&dev_priv->mm.fence_list); - i915_gem_restore_fences(dev); + i915_gem_reset_fences(dev); i915_gem_detect_bit_6_swizzle(dev); init_waitqueue_head(&dev_priv->pending_flip_queue); diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c index bdb0d7717bc7..dca614de71b6 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -709,6 +709,15 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) return snb_gmch_ctl << 25; /* 32 MB units */ } +static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl) +{ + static const int stolen_decoder[] = { + 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; + snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT; + snb_gmch_ctl &= IVB_GMCH_GMS_MASK; + return stolen_decoder[snb_gmch_ctl] << 20; +} + static int gen6_gmch_probe(struct drm_device *dev, size_t *gtt_total, size_t *stolen, @@ -738,7 +747,11 @@ static int gen6_gmch_probe(struct drm_device *dev, pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); - *stolen = gen6_get_stolen_size(snb_gmch_ctl); + if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) + *stolen = gen7_get_stolen_size(snb_gmch_ctl); + else + *stolen = gen6_get_stolen_size(snb_gmch_ctl); + *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; /* For Modern GENs the PTEs and register space are split in the BAR */ diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h index 2d6b62e42daf..83f9c26e1adb 100644 --- a/trunk/drivers/gpu/drm/i915/i915_reg.h +++ b/trunk/drivers/gpu/drm/i915/i915_reg.h @@ -46,6 +46,8 @@ #define SNB_GMCH_GGMS_MASK 0x3 #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ #define SNB_GMCH_GMS_MASK 0x1f +#define IVB_GMCH_GMS_SHIFT 4 +#define IVB_GMCH_GMS_MASK 0xf /* PCI config space */ diff --git a/trunk/drivers/gpu/drm/i915/i915_suspend.c b/trunk/drivers/gpu/drm/i915/i915_suspend.c index 369b3d8776ab..41f0fdecfbdc 100644 --- a/trunk/drivers/gpu/drm/i915/i915_suspend.c +++ b/trunk/drivers/gpu/drm/i915/i915_suspend.c @@ -384,7 +384,6 @@ int i915_restore_state(struct drm_device *dev) mutex_lock(&dev->struct_mutex); - i915_gem_restore_fences(dev); i915_restore_display(dev); if (!drm_core_check_feature(dev, DRIVER_MODESET)) { diff --git a/trunk/drivers/gpu/drm/i915/intel_ddi.c b/trunk/drivers/gpu/drm/i915/intel_ddi.c index fb961bb81903..26a0a570f92e 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ddi.c +++ b/trunk/drivers/gpu/drm/i915/intel_ddi.c @@ -1265,8 +1265,6 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); - if (port != PORT_A) - intel_dp_stop_link_train(intel_dp); } } @@ -1328,9 +1326,6 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) } else if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (port == PORT_A) - intel_dp_stop_link_train(intel_dp); - ironlake_edp_backlight_on(intel_dp); } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 56746dcac40f..efe829919755 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -7937,11 +7937,6 @@ intel_modeset_check_state(struct drm_device *dev) memset(&pipe_config, 0, sizeof(pipe_config)); active = dev_priv->display.get_pipe_config(crtc, &pipe_config); - - /* hw state is inconsistent with the pipe A quirk */ - if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) - active = crtc->active; - WARN(crtc->active != active, "crtc active state doesn't match with hw state " "(expected %i, found %i)\n", crtc->active, active); @@ -8145,21 +8140,6 @@ static void intel_set_config_restore_state(struct drm_device *dev, } } -static bool -is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors, - int num_connectors) -{ - int i; - - for (i = 0; i < num_connectors; i++) - if (connectors[i].encoder && - connectors[i].encoder->crtc == crtc && - connectors[i].dpms != DRM_MODE_DPMS_ON) - return true; - - return false; -} - static void intel_set_config_compute_mode_changes(struct drm_mode_set *set, struct intel_set_config *config) @@ -8167,11 +8147,7 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, /* We should be able to check here if the fb has the same properties * and then just flip_or_move it */ - if (set->connectors != NULL && - is_crtc_connector_off(set->crtc, *set->connectors, - set->num_connectors)) { - config->mode_changed = true; - } else if (set->crtc->fb != set->fb) { + if (set->crtc->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ if (set->crtc->fb == NULL) { DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); @@ -8181,9 +8157,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, } else if (set->fb->pixel_format != set->crtc->fb->pixel_format) { config->mode_changed = true; - } else { + } else config->fb_changed = true; - } } if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) @@ -8357,6 +8332,11 @@ static int intel_crtc_set_config(struct drm_mode_set *set) ret = intel_set_mode(set->crtc, set->mode, set->x, set->y, set->fb); + if (ret) { + DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", + set->crtc->base.id, ret); + goto fail; + } } else if (config->fb_changed) { intel_crtc_wait_for_pending_flips(set->crtc); @@ -8364,18 +8344,18 @@ static int intel_crtc_set_config(struct drm_mode_set *set) set->x, set->y, set->fb); } - if (ret) { - DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", - set->crtc->base.id, ret); + intel_set_config_free(config); + + return 0; + fail: - intel_set_config_restore_state(dev, config); + intel_set_config_restore_state(dev, config); - /* Try to restore the config */ - if (config->mode_changed && - intel_set_mode(save_set.crtc, save_set.mode, - save_set.x, save_set.y, save_set.fb)) - DRM_ERROR("failed to restore config after modeset failure\n"); - } + /* Try to restore the config */ + if (config->mode_changed && + intel_set_mode(save_set.crtc, save_set.mode, + save_set.x, save_set.y, save_set.fb)) + DRM_ERROR("failed to restore config after modeset failure\n"); out_config: intel_set_config_free(config); diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c index 70789b1b5642..fb2fbc1e08b9 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dp.c +++ b/trunk/drivers/gpu/drm/i915/intel_dp.c @@ -303,7 +303,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) if (has_aux_irq) done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, - msecs_to_jiffies_timeout(10)); + msecs_to_jiffies(10)); else done = wait_for_atomic(C, 10) == 0; if (!done) @@ -702,9 +702,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ bpp = min_t(int, 8*3, pipe_config->pipe_bpp); - if (is_edp(intel_dp) && dev_priv->edp.bpp) - bpp = min_t(int, bpp, dev_priv->edp.bpp); - for (; bpp >= 6*3; bpp -= 2*3) { mode_rate = intel_dp_link_required(target_clock, bpp); @@ -742,7 +739,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_dp->link_bw = bws[clock]; intel_dp->lane_count = lane_count; adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); - pipe_config->pipe_bpp = bpp; pipe_config->pixel_target_clock = target_clock; DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", @@ -755,6 +751,20 @@ intel_dp_compute_config(struct intel_encoder *encoder, target_clock, adjusted_mode->clock, &pipe_config->dp_m_n); + /* + * XXX: We have a strange regression where using the vbt edp bpp value + * for the link bw computation results in black screens, the panel only + * works when we do the computation at the usual 24bpp (but still + * requires us to use 18bpp). Until that's fully debugged, stay + * bug-for-bug compatible with the old code. + */ + if (is_edp(intel_dp) && dev_priv->edp.bpp) { + DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", + bpp, dev_priv->edp.bpp); + bpp = min_t(int, bpp, dev_priv->edp.bpp); + } + pipe_config->pipe_bpp = bpp; + return true; } @@ -1379,7 +1389,6 @@ static void intel_enable_dp(struct intel_encoder *encoder) ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); - intel_dp_stop_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); } @@ -1702,9 +1711,10 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dev->dev_private; enum port port = intel_dig_port->port; int ret; + uint32_t temp; if (HAS_DDI(dev)) { - uint32_t temp = I915_READ(DP_TP_CTL(port)); + temp = I915_READ(DP_TP_CTL(port)); if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) temp |= DP_TP_CTL_SCRAMBLE_DISABLE; @@ -1714,6 +1724,18 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { case DP_TRAINING_PATTERN_DISABLE: + + if (port != PORT_A) { + temp |= DP_TP_CTL_LINK_TRAIN_IDLE; + I915_WRITE(DP_TP_CTL(port), temp); + + if (wait_for((I915_READ(DP_TP_STATUS(port)) & + DP_TP_STATUS_IDLE_DONE), 1)) + DRM_ERROR("Timed out waiting for DP idle patterns\n"); + + temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; + } + temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; break; @@ -1789,37 +1811,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, return true; } -static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - enum port port = intel_dig_port->port; - uint32_t val; - - if (!HAS_DDI(dev)) - return; - - val = I915_READ(DP_TP_CTL(port)); - val &= ~DP_TP_CTL_LINK_TRAIN_MASK; - val |= DP_TP_CTL_LINK_TRAIN_IDLE; - I915_WRITE(DP_TP_CTL(port), val); - - /* - * On PORT_A we can have only eDP in SST mode. There the only reason - * we need to set idle transmission mode is to work around a HW issue - * where we enable the pipe while not in idle link-training mode. - * In this case there is requirement to wait for a minimum number of - * idle patterns to be sent. - */ - if (port == PORT_A) - return; - - if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), - 1)) - DRM_ERROR("Timed out waiting for DP idle patterns\n"); -} - /* Enable corresponding port and start training pattern 1 */ void intel_dp_start_link_train(struct intel_dp *intel_dp) @@ -1962,19 +1953,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ++tries; } - intel_dp_set_idle_link_train(intel_dp); - - intel_dp->DP = DP; - if (channel_eq) DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); -} - -void intel_dp_stop_link_train(struct intel_dp *intel_dp) -{ - intel_dp_set_link_train(intel_dp, intel_dp->DP, - DP_TRAINING_PATTERN_DISABLE); + intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); } static void @@ -2182,7 +2164,6 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) drm_get_encoder_name(&intel_encoder->base)); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); - intel_dp_stop_link_train(intel_dp); } } diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index 624a9e6b8d71..b5b6d19e6dd3 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -499,7 +499,6 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, extern void intel_dp_init_link_config(struct intel_dp *intel_dp); extern void intel_dp_start_link_train(struct intel_dp *intel_dp); extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); -extern void intel_dp_stop_link_train(struct intel_dp *intel_dp); extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); extern void intel_dp_check_link_status(struct intel_dp *intel_dp); diff --git a/trunk/drivers/gpu/drm/i915/intel_fb.c b/trunk/drivers/gpu/drm/i915/intel_fb.c index 6b7c3ca2c035..0e19e575a1b4 100644 --- a/trunk/drivers/gpu/drm/i915/intel_fb.c +++ b/trunk/drivers/gpu/drm/i915/intel_fb.c @@ -262,22 +262,10 @@ void intel_fbdev_fini(struct drm_device *dev) void intel_fbdev_set_suspend(struct drm_device *dev, int state) { drm_i915_private_t *dev_priv = dev->dev_private; - struct intel_fbdev *ifbdev = dev_priv->fbdev; - struct fb_info *info; - - if (!ifbdev) + if (!dev_priv->fbdev) return; - info = ifbdev->helper.fbdev; - - /* On resume from hibernation: If the object is shmemfs backed, it has - * been restored from swap. If the object is stolen however, it will be - * full of whatever garbage was left in there. - */ - if (!state && ifbdev->ifb.obj->stolen) - memset_io(info->screen_base, 0, info->screen_size); - - fb_set_suspend(info, state); + fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); } MODULE_LICENSE("GPL and additional rights"); diff --git a/trunk/drivers/gpu/drm/i915/intel_i2c.c b/trunk/drivers/gpu/drm/i915/intel_i2c.c index 639fe192997c..5d245031e391 100644 --- a/trunk/drivers/gpu/drm/i915/intel_i2c.c +++ b/trunk/drivers/gpu/drm/i915/intel_i2c.c @@ -228,7 +228,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv, * need to wake up periodically and check that ourselves. */ I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en); - for (i = 0; i < msecs_to_jiffies_timeout(50); i++) { + for (i = 0; i < msecs_to_jiffies(50) + 1; i++) { prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait, TASK_UNINTERRUPTIBLE); @@ -263,8 +263,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) /* Important: The hw handles only the first bit, so set only one! */ I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN); - ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, - msecs_to_jiffies_timeout(10)); + ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10); I915_WRITE(GMBUS4 + reg_offset, 0); diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index 29412cc89c7a..f36f1baabd5a 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -815,10 +815,10 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, - .ident = "Hewlett-Packard HP t5740", + .ident = "Hewlett-Packard HP t5740e Thin Client", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, " t5740"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"), }, }, { diff --git a/trunk/drivers/gpu/drm/i915/intel_pm.c b/trunk/drivers/gpu/drm/i915/intel_pm.c index aa01128ff192..de3b0dc5658b 100644 --- a/trunk/drivers/gpu/drm/i915/intel_pm.c +++ b/trunk/drivers/gpu/drm/i915/intel_pm.c @@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev) vlv_update_drain_latency(dev); - if (g4x_compute_wm0(dev, PIPE_A, + if (g4x_compute_wm0(dev, 0, &valleyview_wm_info, latency_ns, &valleyview_cursor_wm_info, latency_ns, &planea_wm, &cursora_wm)) - enabled |= 1 << PIPE_A; + enabled |= 1; - if (g4x_compute_wm0(dev, PIPE_B, + if (g4x_compute_wm0(dev, 1, &valleyview_wm_info, latency_ns, &valleyview_cursor_wm_info, latency_ns, &planeb_wm, &cursorb_wm)) - enabled |= 1 << PIPE_B; + enabled |= 2; if (single_plane_enabled(enabled) && g4x_compute_srwm(dev, ffs(enabled) - 1, @@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev) int plane_sr, cursor_sr; unsigned int enabled = 0; - if (g4x_compute_wm0(dev, PIPE_A, + if (g4x_compute_wm0(dev, 0, &g4x_wm_info, latency_ns, &g4x_cursor_wm_info, latency_ns, &planea_wm, &cursora_wm)) - enabled |= 1 << PIPE_A; + enabled |= 1; - if (g4x_compute_wm0(dev, PIPE_B, + if (g4x_compute_wm0(dev, 1, &g4x_wm_info, latency_ns, &g4x_cursor_wm_info, latency_ns, &planeb_wm, &cursorb_wm)) - enabled |= 1 << PIPE_B; + enabled |= 2; if (single_plane_enabled(enabled) && g4x_compute_srwm(dev, ffs(enabled) - 1, @@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev) unsigned int enabled; enabled = 0; - if (g4x_compute_wm0(dev, PIPE_A, + if (g4x_compute_wm0(dev, 0, &ironlake_display_wm_info, ILK_LP0_PLANE_LATENCY, &ironlake_cursor_wm_info, @@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1 << PIPE_A; + enabled |= 1; } - if (g4x_compute_wm0(dev, PIPE_B, + if (g4x_compute_wm0(dev, 1, &ironlake_display_wm_info, ILK_LP0_PLANE_LATENCY, &ironlake_cursor_wm_info, @@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1 << PIPE_B; + enabled |= 2; } /* @@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev) unsigned int enabled; enabled = 0; - if (g4x_compute_wm0(dev, PIPE_A, + if (g4x_compute_wm0(dev, 0, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1 << PIPE_A; + enabled |= 1; } - if (g4x_compute_wm0(dev, PIPE_B, + if (g4x_compute_wm0(dev, 1, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1 << PIPE_B; + enabled |= 2; } /* @@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev) unsigned int enabled; enabled = 0; - if (g4x_compute_wm0(dev, PIPE_A, + if (g4x_compute_wm0(dev, 0, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1 << PIPE_A; + enabled |= 1; } - if (g4x_compute_wm0(dev, PIPE_B, + if (g4x_compute_wm0(dev, 1, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1 << PIPE_B; + enabled |= 2; } - if (g4x_compute_wm0(dev, PIPE_C, + if (g4x_compute_wm0(dev, 2, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe C -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1 << PIPE_C; + enabled |= 3; } /* diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index d4ea6c265ce1..d15428404b9a 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -1776,14 +1776,11 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) * Assume that the preferred modes are * arranged in priority order. */ - intel_ddc_get_modes(connector, &intel_sdvo->ddc); + intel_ddc_get_modes(connector, intel_sdvo->i2c); + if (list_empty(&connector->probed_modes) == false) + goto end; - /* - * Fetch modes from VBT. For SDVO prefer the VBT mode since some - * SDVO->LVDS transcoders can't cope with the EDID mode. Since - * drm_mode_probed_add adds the mode at the head of the list we add it - * last. - */ + /* Fetch modes from VBT */ if (dev_priv->sdvo_lvds_vbt_mode != NULL) { newmode = drm_mode_duplicate(connector->dev, dev_priv->sdvo_lvds_vbt_mode); @@ -1795,6 +1792,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) } } +end: list_for_each_entry(newmode, &connector->probed_modes, head) { if (newmode->type & DRM_MODE_TYPE_PREFERRED) { intel_sdvo->sdvo_lvds_fixed_mode = @@ -2792,6 +2790,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; } + /* Only enable the hotplug irq if we need it, to work around noisy + * hotplug lines. + */ + if (intel_sdvo->hotplug_active) + intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C; + intel_encoder->compute_config = intel_sdvo_compute_config; intel_encoder->disable = intel_disable_sdvo; intel_encoder->mode_set = intel_sdvo_mode_set; @@ -2810,14 +2814,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) goto err_output; } - /* Only enable the hotplug irq if we need it, to work around noisy - * hotplug lines. - */ - if (intel_sdvo->hotplug_active) { - intel_encoder->hpd_pin = - intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C; - } - /* * Cloning SDVO with anything is often impossible, since the SDVO * encoder can request a special input timing mode. And even if that's diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c index ee66badc8bb6..f9889658329b 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -46,26 +46,29 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) static inline void mga_wait_vsync(struct mga_device *mdev) { - unsigned long timeout = jiffies + HZ/10; + unsigned int count = 0; unsigned int status = 0; do { status = RREG32(MGAREG_Status); - } while ((status & 0x08) && time_before(jiffies, timeout)); - timeout = jiffies + HZ/10; + count++; + } while ((status & 0x08) && (count < 250000)); + count = 0; status = 0; do { status = RREG32(MGAREG_Status); - } while (!(status & 0x08) && time_before(jiffies, timeout)); + count++; + } while (!(status & 0x08) && (count < 250000)); } static inline void mga_wait_busy(struct mga_device *mdev) { - unsigned long timeout = jiffies + HZ; + unsigned int count = 0; unsigned int status = 0; do { status = RREG8(MGAREG_Status + 2); - } while ((status & 0x01) && time_before(jiffies, timeout)); + count++; + } while ((status & 0x01) && (count < 500000)); } /* @@ -186,12 +189,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_REMHEADCTL_CLKDIS; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_REMHEADCTL, tmp); /* select PLL Set C */ tmp = RREG8(MGAREG_MEM_MISC_READ); @@ -201,7 +204,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); udelay(500); @@ -209,7 +212,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_VREF_CTL); tmp = RREG8(DAC_DATA); tmp &= ~0x04; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_VREF_CTL, tmp); udelay(50); @@ -233,13 +236,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_REMHEADCTL, tmp); /* reset dotclock rate bit */ WREG8(MGAREG_SEQ_INDEX, 1); @@ -250,7 +253,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); vcount = RREG8(MGAREG_VCOUNT); @@ -315,7 +318,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; @@ -323,12 +326,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); tmp = RREG8(DAC_DATA); - WREG8(DAC_DATA, tmp & ~0x40); + WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); @@ -339,7 +342,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); udelay(500); @@ -347,11 +350,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); tmp = RREG8(DAC_DATA); - WREG8(DAC_DATA, tmp | 0x40); + WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= (0x3 << 2); @@ -360,7 +363,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); return 0; } @@ -413,7 +416,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; @@ -422,7 +425,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); udelay(500); @@ -436,13 +439,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); vcount = RREG8(MGAREG_VCOUNT); @@ -512,12 +515,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_REMHEADCTL_CLKDIS; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_REMHEADCTL, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= (0x3<<2) | 0xc0; @@ -527,7 +530,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG8(DAC_DATA, tmp); + WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); udelay(500); @@ -654,26 +657,12 @@ static void mga_g200wb_commit(struct drm_crtc *crtc) WREG_DAC(MGA1064_GEN_IO_DATA, tmp); } -/* - This is how the framebuffer base address is stored in g200 cards: - * Assume @offset is the gpu_addr variable of the framebuffer object - * Then addr is the number of _pixels_ (not bytes) from the start of - VRAM to the first pixel we want to display. (divided by 2 for 32bit - framebuffers) - * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers - addr<20> -> CRTCEXT0<6> - addr<19-16> -> CRTCEXT0<3-0> - addr<15-8> -> CRTCC<7-0> - addr<7-0> -> CRTCD<7-0> - CRTCEXT0 has to be programmed last to trigger an update and make the - new addr variable take effect. - */ + void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) { struct mga_device *mdev = crtc->dev->dev_private; u32 addr; int count; - u8 crtcext0; while (RREG8(0x1fda) & 0x08); while (!(RREG8(0x1fda) & 0x08)); @@ -681,17 +670,10 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) count = RREG8(MGAREG_VCOUNT) + 2; while (RREG8(MGAREG_VCOUNT) < count); - WREG8(MGAREG_CRTCEXT_INDEX, 0); - crtcext0 = RREG8(MGAREG_CRTCEXT_DATA); - crtcext0 &= 0xB0; - addr = offset / 8; - /* Can't store addresses any higher than that... - but we also don't have more than 16MB of memory, so it should be fine. */ - WARN_ON(addr > 0x1fffff); - crtcext0 |= (!!(addr & (1<<20)))<<6; + addr = offset >> 2; WREG_CRT(0x0d, (u8)(addr & 0xff)); WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff); - WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0); + WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf); } @@ -847,7 +829,11 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, for (i = 0; i < sizeof(dacvalue); i++) { - if ((i <= 0x17) || + if ((i <= 0x03) || + (i == 0x07) || + (i == 0x0b) || + (i == 0x0f) || + ((i >= 0x13) && (i <= 0x17)) || (i == 0x1b) || (i == 0x1c) || ((i >= 0x1f) && (i <= 0x29)) || @@ -1034,14 +1020,13 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, else hi_pri_lvl = 5; - WREG8(MGAREG_CRTCEXT_INDEX, 0x06); - WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl); + WREG8(0x1fde, 0x06); + WREG8(0x1fdf, hi_pri_lvl); } else { - WREG8(MGAREG_CRTCEXT_INDEX, 0x06); if (mdev->reg_1e24 >= 0x01) - WREG8(MGAREG_CRTCEXT_DATA, 0x03); + WREG8(0x1fdf, 0x03); else - WREG8(MGAREG_CRTCEXT_DATA, 0x04); + WREG8(0x1fdf, 0x04); } } return 0; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index a36e64e98ef3..955af122c3a6 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c @@ -138,6 +138,7 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; break; case 0xce: @@ -224,6 +225,7 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; break; case 0xc8: diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c index f02fd9f443ff..d0817d94454c 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c @@ -50,16 +50,11 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) { const u32 doff = (or * 0x800); int load = -EINVAL; - nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000); - nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval); - mdelay(9); - udelay(500); + udelay(9500); nv_wr32(priv, 0x61a00c + doff, 0x80000000); load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27; nv_wr32(priv, 0x61a00c + doff, 0x00000000); - nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000); - nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); return load; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c index 7fdade6e604d..0d36bdc51417 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c @@ -55,10 +55,6 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) nv_wr32(priv, 0x616510 + hoff, 0x00000000); nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001); - nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */ - nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ - nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ - /* ??? */ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c index e9b8217d0075..ddaeb5572903 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c @@ -40,8 +40,8 @@ * FIFO channel objects ******************************************************************************/ -static void -nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv) +void +nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) { struct nouveau_bar *bar = nouveau_bar(priv); struct nouveau_gpuobj *cur; @@ -62,14 +62,6 @@ nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv) nv_wr32(priv, 0x002500, 0x00000101); } -void -nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) -{ - mutex_lock(&nv_subdev(priv)->mutex); - nv50_fifo_playlist_update_locked(priv); - mutex_unlock(&nv_subdev(priv)->mutex); -} - static int nv50_fifo_context_attach(struct nouveau_object *parent, struct nouveau_object *object) @@ -495,7 +487,7 @@ nv50_fifo_init(struct nouveau_object *object) for (i = 0; i < 128; i++) nv_wr32(priv, 0x002600 + (i * 4), 0x00000000); - nv50_fifo_playlist_update_locked(priv); + nv50_fifo_playlist_update(priv); nv_wr32(priv, 0x003200, 0x00000001); nv_wr32(priv, 0x003250, 0x00000001); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index 46dfa68c47bb..4d4a6b905370 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c @@ -71,7 +71,6 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv) struct nouveau_gpuobj *cur; int i, p; - mutex_lock(&nv_subdev(priv)->mutex); cur = priv->playlist[priv->cur_playlist]; priv->cur_playlist = !priv->cur_playlist; @@ -88,7 +87,6 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv) nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3)); if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000)) nv_error(priv, "playlist update failed\n"); - mutex_unlock(&nv_subdev(priv)->mutex); } static int @@ -250,17 +248,9 @@ nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend) struct nvc0_fifo_priv *priv = (void *)object->engine; struct nvc0_fifo_chan *chan = (void *)object; u32 chid = chan->base.chid; - u32 mask, engine; nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000); nvc0_fifo_playlist_update(priv); - mask = nv_rd32(priv, 0x0025a4); - for (engine = 0; mask && engine < 16; engine++) { - if (!(mask & (1 << engine))) - continue; - nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000); - mask &= ~(1 << engine); - } nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000); return nouveau_fifo_channel_fini(&chan->base, suspend); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 56192a7242ae..9151919fb831 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -94,13 +94,11 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine) u32 match = (engine << 16) | 0x00000001; int i, p; - mutex_lock(&nv_subdev(priv)->mutex); cur = engn->playlist[engn->cur_playlist]; if (unlikely(cur == NULL)) { int ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, 0, &cur); if (ret) { - mutex_unlock(&nv_subdev(priv)->mutex); nv_error(priv, "playlist alloc failed\n"); return; } @@ -124,7 +122,6 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine) nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) nv_error(priv, "playlist %d update timeout\n", engine); - mutex_unlock(&nv_subdev(priv)->mutex); } static int diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h index 5a5961b6a6a3..0a393f7f055f 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h @@ -218,7 +218,7 @@ struct nv04_display_class { #define NV50_DISP_DAC_PWR_STATE 0x00000040 #define NV50_DISP_DAC_PWR_STATE_ON 0x00000000 #define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040 -#define NV50_DISP_DAC_LOAD 0x00020100 +#define NV50_DISP_DAC_LOAD 0x0002000c #define NV50_DISP_DAC_LOAD_VALUE 0x00000007 #define NV50_DISP_PIOR_MTHD 0x00030000 diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index c434d398d16f..c300b5e7b670 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c @@ -1940,8 +1940,8 @@ init_zm_mask_add(struct nvbios_init *init) trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add); init->offset += 13; - data = init_rd32(init, addr); - data = (data & mask) | ((data + add) & ~mask); + data = init_rd32(init, addr) & mask; + data |= ((data + add) & ~mask); init_wr32(init, addr, data); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c index fb794e997fbc..e4940fb166e8 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c @@ -29,6 +29,7 @@ struct nvc0_ltcg_priv { struct nouveau_ltcg base; u32 part_nr; + u32 part_mask; u32 subp_nr; struct nouveau_mm tags; u32 num_tags; @@ -104,6 +105,8 @@ nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count) /* wait until it's finished with clearing */ for (p = 0; p < priv->part_nr; ++p) { + if (!(priv->part_mask & (1 << p))) + continue; for (i = 0; i < priv->subp_nr; ++i) nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0); } @@ -118,8 +121,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) int ret; nv_wr32(priv, 0x17e8d8, priv->part_nr); - if (nv_device(pfb)->card_type >= NV_E0) - nv_wr32(priv, 0x17e000, priv->part_nr); /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ priv->num_tags = (pfb->ram.size >> 17) / 4; @@ -166,20 +167,16 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, { struct nvc0_ltcg_priv *priv; struct nouveau_fb *pfb = nouveau_fb(parent); - u32 parts, mask; - int ret, i; + int ret; ret = nouveau_ltcg_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; - parts = nv_rd32(priv, 0x022438); - mask = nv_rd32(priv, 0x022554); - for (i = 0; i < parts; i++) { - if (!(mask & (1 << i))) - priv->part_nr++; - } + priv->part_nr = nv_rd32(priv, 0x022438); + priv->part_mask = nv_rd32(priv, 0x022554); + priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_display.c b/trunk/drivers/gpu/drm/nouveau/nouveau_display.c index f17dc2ab03ec..7bf22d4a3d96 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_display.c @@ -638,8 +638,17 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, } s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); - if (s->event) - drm_send_vblank_event(dev, -1, s->event); + if (s->event) { + struct drm_pending_vblank_event *e = s->event; + struct timeval now; + + do_gettimeofday(&now); + e->event.sequence = 0; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } list_del(&s->head); if (ps) diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c index 383f4e6ea9d1..46c152ff0a80 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -453,32 +453,18 @@ nouveau_do_suspend(struct drm_device *dev) NV_INFO(drm, "evicting buffers...\n"); ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); - NV_INFO(drm, "waiting for kernel channels to go idle...\n"); - if (drm->cechan) { - ret = nouveau_channel_idle(drm->cechan); - if (ret) - return ret; - } - - if (drm->channel) { - ret = nouveau_channel_idle(drm->channel); - if (ret) - return ret; - } - - NV_INFO(drm, "suspending client object trees...\n"); if (drm->fence && nouveau_fence(drm)->suspend) { if (!nouveau_fence(drm)->suspend(drm)) return -ENOMEM; } + NV_INFO(drm, "suspending client object trees...\n"); list_for_each_entry(cli, &drm->clients, head) { ret = nouveau_client_fini(&cli->base, true); if (ret) goto fail_client; } - NV_INFO(drm, "suspending kernel object tree...\n"); ret = nouveau_client_fini(&drm->client.base, true); if (ret) goto fail_client; @@ -528,18 +514,17 @@ nouveau_do_resume(struct drm_device *dev) nouveau_agp_reset(drm); - NV_INFO(drm, "resuming kernel object tree...\n"); + NV_INFO(drm, "resuming client object trees...\n"); nouveau_client_init(&drm->client.base); nouveau_agp_init(drm); - NV_INFO(drm, "resuming client object trees...\n"); - if (drm->fence && nouveau_fence(drm)->resume) - nouveau_fence(drm)->resume(drm); - list_for_each_entry(cli, &drm->clients, head) { nouveau_client_init(&cli->base); } + if (drm->fence && nouveau_fence(drm)->resume) + nouveau_fence(drm)->resume(drm); + nouveau_run_vbios_init(dev); nouveau_pm_resume(dev); diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_display.c b/trunk/drivers/gpu/drm/nouveau/nv50_display.c index dd5e01f89f28..ebf0a683305e 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_display.c @@ -1554,9 +1554,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct nv50_disp *disp = nv50_disp(encoder->dev); int ret, or = nouveau_encoder(encoder)->or; - u32 load = nouveau_drm(encoder->dev)->vbios.dactestval; - if (load == 0) - load = 340; + u32 load = 0; ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); if (ret || load != 7) diff --git a/trunk/drivers/gpu/drm/omapdrm/omap_drv.c b/trunk/drivers/gpu/drm/omapdrm/omap_drv.c index 826586ffbe83..9c53c25e5201 100644 --- a/trunk/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/trunk/drivers/gpu/drm/omapdrm/omap_drv.c @@ -649,9 +649,6 @@ static void pdev_shutdown(struct platform_device *device) static int pdev_probe(struct platform_device *device) { - if (omapdss_is_initialized() == false) - return -EPROBE_DEFER; - DBG("%s", device->name); return drm_platform_init(&omap_drm_driver, device); } diff --git a/trunk/drivers/gpu/drm/qxl/Kconfig b/trunk/drivers/gpu/drm/qxl/Kconfig index d6c12796023c..2f1a57e11140 100644 --- a/trunk/drivers/gpu/drm/qxl/Kconfig +++ b/trunk/drivers/gpu/drm/qxl/Kconfig @@ -4,7 +4,6 @@ config DRM_QXL select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT - select FB_DEFERRED_IO select DRM_KMS_HELPER select DRM_TTM help diff --git a/trunk/drivers/gpu/drm/qxl/qxl_cmd.c b/trunk/drivers/gpu/drm/qxl/qxl_cmd.c index f86771481317..08b0823c93d5 100644 --- a/trunk/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/trunk/drivers/gpu/drm/qxl/qxl_cmd.c @@ -277,7 +277,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, return 0; } -static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) +static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) { int irq_num; long addr = qdev->io_base + port; @@ -285,29 +285,20 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, mutex_lock(&qdev->async_io_mutex); irq_num = atomic_read(&qdev->irq_received_io_cmd); + + if (qdev->last_sent_io_cmd > irq_num) { - if (intr) - ret = wait_event_interruptible_timeout(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); - else - ret = wait_event_timeout(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); - /* 0 is timeout, just bail the "hw" has gone away */ - if (ret <= 0) + ret = wait_event_interruptible(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num); + if (ret) goto out; irq_num = atomic_read(&qdev->irq_received_io_cmd); } outb(val, addr); qdev->last_sent_io_cmd = irq_num + 1; - if (intr) - ret = wait_event_interruptible_timeout(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); - else - ret = wait_event_timeout(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + ret = wait_event_interruptible(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num); out: - if (ret > 0) - ret = 0; mutex_unlock(&qdev->async_io_mutex); return ret; } @@ -317,7 +308,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port) int ret; restart: - ret = wait_for_io_cmd_user(qdev, val, port, false); + ret = wait_for_io_cmd_user(qdev, val, port); if (ret == -ERESTARTSYS) goto restart; } @@ -349,7 +340,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, mutex_lock(&qdev->update_area_mutex); qdev->ram_header->update_area = *area; qdev->ram_header->update_surface = surface_id; - ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true); + ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); mutex_unlock(&qdev->update_area_mutex); return ret; } diff --git a/trunk/drivers/gpu/drm/qxl/qxl_display.c b/trunk/drivers/gpu/drm/qxl/qxl_display.c index 823d29e926ec..fcfd4436ceed 100644 --- a/trunk/drivers/gpu/drm/qxl/qxl_display.c +++ b/trunk/drivers/gpu/drm/qxl/qxl_display.c @@ -428,10 +428,10 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, int inc = 1; qobj = gem_to_qxl_bo(qxl_fb->obj); - /* if we aren't primary surface ignore this */ - if (!qobj->is_primary) - return 0; - + if (qxl_fb != qdev->active_user_framebuffer) { + DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n", + __func__, qxl_fb, qdev->active_user_framebuffer); + } if (!num_clips) { num_clips = 1; clips = &norect; @@ -604,6 +604,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, mode->hdisplay, mode->vdisplay); } + qdev->mode_set = true; return 0; } @@ -892,6 +893,7 @@ qxl_user_framebuffer_create(struct drm_device *dev, { struct drm_gem_object *obj; struct qxl_framebuffer *qxl_fb; + struct qxl_device *qdev = dev->dev_private; int ret; obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); @@ -907,6 +909,13 @@ qxl_user_framebuffer_create(struct drm_device *dev, return NULL; } + if (qdev->active_user_framebuffer) { + DRM_INFO("%s: active_user_framebuffer %p -> %p\n", + __func__, + qdev->active_user_framebuffer, qxl_fb); + } + qdev->active_user_framebuffer = qxl_fb; + return &qxl_fb->base; } diff --git a/trunk/drivers/gpu/drm/qxl/qxl_drv.h b/trunk/drivers/gpu/drm/qxl/qxl_drv.h index 43d06ab28a21..52b582c211da 100644 --- a/trunk/drivers/gpu/drm/qxl/qxl_drv.h +++ b/trunk/drivers/gpu/drm/qxl/qxl_drv.h @@ -255,6 +255,12 @@ struct qxl_device { struct qxl_gem gem; struct qxl_mode_info mode_info; + /* + * last created framebuffer with fb_create + * only used by debugfs dumbppm + */ + struct qxl_framebuffer *active_user_framebuffer; + struct fb_info *fbdev_info; struct qxl_framebuffer *fbdev_qfb; void *ram_physical; @@ -264,6 +270,7 @@ struct qxl_device { struct qxl_ring *cursor_ring; struct qxl_ram_header *ram_header; + bool mode_set; bool primary_created; diff --git a/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c b/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c index a30f29425c21..04b64f9cbfdb 100644 --- a/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -151,7 +151,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct qxl_bo *cmd_bo; int release_type; struct drm_qxl_command *commands = - (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; + (struct drm_qxl_command *)execbuffer->commands; if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], sizeof(user_cmd))) @@ -171,11 +171,6 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) return -EINVAL; - if (!access_ok(VERIFY_READ, - (void *)(unsigned long)user_cmd.command, - user_cmd.command_size)) - return -EFAULT; - ret = qxl_alloc_release_reserved(qdev, sizeof(union qxl_release_info) + user_cmd.command_size, @@ -198,7 +193,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, for (i = 0 ; i < user_cmd.relocs_num; ++i) { if (DRM_COPY_FROM_USER(&reloc, - &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], + &((struct drm_qxl_reloc *)user_cmd.relocs)[i], sizeof(reloc))) { qxl_bo_list_unreserve(&reloc_list, true); qxl_release_unreserve(qdev, release); @@ -299,7 +294,6 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, goto out; if (!qobj->pin_count) { - qxl_ttm_placement_from_domain(qobj, qobj->type); ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, true, false); if (unlikely(ret)) diff --git a/trunk/drivers/gpu/drm/qxl/qxl_kms.c b/trunk/drivers/gpu/drm/qxl/qxl_kms.c index e27ce2a907cf..85127ed24cfd 100644 --- a/trunk/drivers/gpu/drm/qxl/qxl_kms.c +++ b/trunk/drivers/gpu/drm/qxl/qxl_kms.c @@ -128,13 +128,12 @@ int qxl_device_init(struct qxl_device *qdev, qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size); - DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n", - (unsigned long long)qdev->vram_base, - (unsigned long long)pci_resource_end(pdev, 0), + DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n", + (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0), (int)pci_resource_len(pdev, 0) / 1024 / 1024, (int)pci_resource_len(pdev, 0) / 1024, - (unsigned long long)qdev->surfaceram_base, - (unsigned long long)pci_resource_end(pdev, 1), + (void *)qdev->surfaceram_base, + (void *)pci_resource_end(pdev, 1), (int)qdev->surfaceram_size / 1024 / 1024, (int)qdev->surfaceram_size / 1024); diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c index d5df8fd10217..6d6fdb3ba0d0 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1811,9 +1811,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; + radeon_crtc->in_mode_set = true; + /* disable crtc pair power gating before programming */ if (ASIC_IS_DCE6(rdev)) atombios_powergate_crtc(crtc, ATOM_DISABLE); @@ -1824,8 +1827,11 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc) static void atombios_crtc_commit(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); atombios_lock_crtc(crtc, ATOM_DISABLE); + radeon_crtc->in_mode_set = false; } static void atombios_crtc_disable(struct drm_crtc *crtc) diff --git a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c index 8406c8251fbf..44a7da66e081 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c @@ -667,8 +667,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) int atombios_get_encoder_mode(struct drm_encoder *encoder) { - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_connector *connector; struct radeon_connector *radeon_connector; @@ -695,8 +693,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio && - !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */ + radeon_audio) return ATOM_ENCODER_MODE_HDMI; else if (radeon_connector->use_digital) return ATOM_ENCODER_MODE_DVI; @@ -707,8 +704,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) case DRM_MODE_CONNECTOR_HDMIA: default: if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio && - !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */ + radeon_audio) return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; @@ -722,8 +718,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return ATOM_ENCODER_MODE_DP; else if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio && - !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */ + radeon_audio) return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 0f89ce3d02b9..105bafb6c29d 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -2343,13 +2343,11 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav u32 crtc_enabled, tmp, frame_count, blackout; int i, j; - if (!ASIC_IS_NODCE(rdev)) { - save->vga_render_control = RREG32(VGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); + save->vga_render_control = RREG32(VGA_RENDER_CONTROL); + save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); - /* disable VGA render */ - WREG32(VGA_RENDER_CONTROL, 0); - } + /* disable VGA render */ + WREG32(VGA_RENDER_CONTROL, 0); /* blank the display controllers */ for (i = 0; i < rdev->num_crtc; i++) { crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN; @@ -2440,11 +2438,8 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], (u32)rdev->mc.vram_start); } - - if (!ASIC_IS_NODCE(rdev)) { - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); - } + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); /* unlock regs and wait for update */ for (i = 0; i < rdev->num_crtc; i++) { @@ -2504,12 +2499,10 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s } } } - if (!ASIC_IS_NODCE(rdev)) { - /* Unlock vga access */ - WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(VGA_RENDER_CONTROL, save->vga_render_control); - } + /* Unlock vga access */ + WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); + mdelay(1); + WREG32(VGA_RENDER_CONTROL, save->vga_render_control); } void evergreen_mc_program(struct radeon_device *rdev) @@ -3412,8 +3405,8 @@ int evergreen_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); } else { /* size in MB on evergreen/cayman/tn */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; } rdev->mc.visible_vram_size = rdev->mc.aper_size; r700_vram_gtt_location(rdev, &rdev->mc); @@ -4754,12 +4747,6 @@ static int evergreen_startup(struct radeon_device *rdev) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - r = r600_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); @@ -4929,6 +4916,10 @@ int evergreen_init(struct radeon_device *rdev) if (r) return r; + r = radeon_irq_kms_init(rdev); + if (r) + return r; + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -5001,7 +4992,8 @@ void evergreen_fini(struct radeon_device *rdev) void evergreen_pcie_gen2_enable(struct radeon_device *rdev) { - u32 link_width_cntl, speed_cntl; + u32 link_width_cntl, speed_cntl, mask; + int ret; if (radeon_pcie_gen2 == 0) return; @@ -5016,8 +5008,11 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev) if (ASIC_IS_X2(rdev)) return; - if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) && - (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT)) + ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & DRM_PCIE_SPEED_50)) return; speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); diff --git a/trunk/drivers/gpu/drm/radeon/evergreen_hdmi.c b/trunk/drivers/gpu/drm/radeon/evergreen_hdmi.c index ed7c8a768092..b4ab8ceb1654 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -154,18 +154,19 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - u32 base_rate = 24000; + u32 base_rate = 48000; if (!dig || !dig->afmt) return; + /* XXX: properly calculate this */ /* XXX two dtos; generally use dto0 for hdmi */ /* Express [24MHz / target pixel clock] as an exact rational * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator */ - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); - WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); + WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff); + WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff); WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); } diff --git a/trunk/drivers/gpu/drm/radeon/ni.c b/trunk/drivers/gpu/drm/radeon/ni.c index 84583302b081..7969c0c8ec20 100644 --- a/trunk/drivers/gpu/drm/radeon/ni.c +++ b/trunk/drivers/gpu/drm/radeon/ni.c @@ -2025,12 +2025,6 @@ static int cayman_startup(struct radeon_device *rdev) } /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - r = r600_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); @@ -2196,6 +2190,10 @@ int cayman_init(struct radeon_device *rdev) if (r) return r; + r = radeon_irq_kms_init(rdev); + if (r) + return r; + ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); diff --git a/trunk/drivers/gpu/drm/radeon/r100.c b/trunk/drivers/gpu/drm/radeon/r100.c index d0314ecbd7c1..4973bff37fec 100644 --- a/trunk/drivers/gpu/drm/radeon/r100.c +++ b/trunk/drivers/gpu/drm/radeon/r100.c @@ -3869,12 +3869,6 @@ static int r100_startup(struct radeon_device *rdev) } /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - r100_irq_set(rdev); rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -4028,6 +4022,9 @@ int r100_init(struct radeon_device *rdev) r100_mc_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); if (r) return r; /* Memory manager */ diff --git a/trunk/drivers/gpu/drm/radeon/r300.c b/trunk/drivers/gpu/drm/radeon/r300.c index b9b776f1e582..c60350e6872d 100644 --- a/trunk/drivers/gpu/drm/radeon/r300.c +++ b/trunk/drivers/gpu/drm/radeon/r300.c @@ -1382,12 +1382,6 @@ static int r300_startup(struct radeon_device *rdev) } /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -1520,6 +1514,9 @@ int r300_init(struct radeon_device *rdev) r300_mc_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); if (r) return r; /* Memory manager */ diff --git a/trunk/drivers/gpu/drm/radeon/r300_cmdbuf.c b/trunk/drivers/gpu/drm/radeon/r300_cmdbuf.c index 60170ea5e3a2..865e2c9980db 100644 --- a/trunk/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/trunk/drivers/gpu/drm/radeon/r300_cmdbuf.c @@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); for (i = 0; i < nr; ++i) { - if (DRM_COPY_FROM_USER + if (DRM_COPY_FROM_USER_UNCHECKED (&box, &cmdbuf->boxes[n + i], sizeof(box))) { DRM_ERROR("copy cliprect faulted\n"); return -EFAULT; diff --git a/trunk/drivers/gpu/drm/radeon/r420.c b/trunk/drivers/gpu/drm/radeon/r420.c index 4e796ecf9ea4..6fce2eb4dd16 100644 --- a/trunk/drivers/gpu/drm/radeon/r420.c +++ b/trunk/drivers/gpu/drm/radeon/r420.c @@ -265,12 +265,6 @@ static int r420_startup(struct radeon_device *rdev) } /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -417,6 +411,10 @@ int r420_init(struct radeon_device *rdev) if (r) { return r; } + r = radeon_irq_kms_init(rdev); + if (r) { + return r; + } /* Memory manager */ r = radeon_bo_init(rdev); if (r) { diff --git a/trunk/drivers/gpu/drm/radeon/r520.c b/trunk/drivers/gpu/drm/radeon/r520.c index e1aece73b370..f795a4e092cb 100644 --- a/trunk/drivers/gpu/drm/radeon/r520.c +++ b/trunk/drivers/gpu/drm/radeon/r520.c @@ -194,12 +194,6 @@ static int r520_startup(struct radeon_device *rdev) } /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -301,6 +295,9 @@ int r520_init(struct radeon_device *rdev) rv515_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); if (r) return r; /* Memory manager */ diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index 6948eb88c2b7..1a08008c978b 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -1046,24 +1046,6 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) -{ - uint32_t r; - - WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); - r = RREG32(R_0028FC_MC_DATA); - WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); - return r; -} - -void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) -{ - WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | - S_0028F8_MC_IND_WR_EN(1)); - WREG32(R_0028FC_MC_DATA, v); - WREG32(R_0028F8_MC_INDEX, 0x7F); -} - static void r600_mc_program(struct radeon_device *rdev) { struct rv515_mc_save save; @@ -1199,8 +1181,6 @@ static int r600_mc_init(struct radeon_device *rdev) { u32 tmp; int chansize, numchan; - uint32_t h_addr, l_addr; - unsigned long long k8_addr; /* Get VRAM informations */ rdev->mc.vram_is_ddr = true; @@ -1241,30 +1221,7 @@ static int r600_mc_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) { rs690_pm_info(rdev); rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); - - if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { - /* Use K8 direct mapping for fast fb access. */ - rdev->fastfb_working = false; - h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL)); - l_addr = RREG32_MC(R_000011_K8_FB_LOCATION); - k8_addr = ((unsigned long long)h_addr) << 32 | l_addr; -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) - if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL) -#endif - { - /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport - * memory is present. - */ - if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) { - DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", - (unsigned long long)rdev->mc.aper_base, k8_addr); - rdev->mc.aper_base = (resource_size_t)k8_addr; - rdev->fastfb_working = true; - } - } - } } - radeon_update_bandwidth_info(rdev); return 0; } @@ -2687,9 +2644,6 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev) int r600_uvd_init(struct radeon_device *rdev) { int i, j, r; - /* disable byte swapping */ - u32 lmi_swap_cntl = 0; - u32 mp_swap_cntl = 0; /* raise clocks while booting up the VCPU */ radeon_set_uvd_clocks(rdev, 53300, 40000); @@ -2714,13 +2668,9 @@ int r600_uvd_init(struct radeon_device *rdev) WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | (1 << 21) | (1 << 9) | (1 << 20)); -#ifdef __BIG_ENDIAN - /* swap (8 in 32) RB and IB */ - lmi_swap_cntl = 0xa; - mp_swap_cntl = 0; -#endif - WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl); - WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl); + /* disable byte swapping */ + WREG32(UVD_LMI_SWAP_CNTL, 0); + WREG32(UVD_MP_SWAP_CNTL, 0); WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); WREG32(UVD_MPC_SET_MUXA1, 0x0); @@ -3252,12 +3202,6 @@ static int r600_startup(struct radeon_device *rdev) } /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - r = r600_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); @@ -3412,6 +3356,10 @@ int r600_init(struct radeon_device *rdev) if (r) return r; + r = radeon_irq_kms_init(rdev); + if (r) + return r; + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -4683,6 +4631,8 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) { u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; u16 link_cntl2; + u32 mask; + int ret; if (radeon_pcie_gen2 == 0) return; @@ -4701,8 +4651,11 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) if (rdev->family <= CHIP_R600) return; - if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) && - (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT)) + ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & DRM_PCIE_SPEED_50)) return; speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); diff --git a/trunk/drivers/gpu/drm/radeon/r600_hdmi.c b/trunk/drivers/gpu/drm/radeon/r600_hdmi.c index 456750a0daa5..47f180a79352 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/trunk/drivers/gpu/drm/radeon/r600_hdmi.c @@ -232,7 +232,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 base_rate = 24000; + u32 base_rate = 48000; if (!dig || !dig->afmt) return; @@ -240,6 +240,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. * doesn't matter which one you use. Just use the first one. */ + /* XXX: properly calculate this */ /* XXX two dtos; generally use dto0 for hdmi */ /* Express [24MHz / target pixel clock] as an exact rational * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE @@ -249,13 +250,13 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) /* according to the reg specs, this should DCE3.2 only, but in * practice it seems to cover DCE3.0 as well. */ - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); + WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50); WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ } else { /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ - WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | - AUDIO_DTO_MODULE(clock / 10)); + WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) | + AUDIO_DTO_MODULE(clock * 100)); } } diff --git a/trunk/drivers/gpu/drm/radeon/r600d.h b/trunk/drivers/gpu/drm/radeon/r600d.h index 79df558f8c40..acb146c06973 100644 --- a/trunk/drivers/gpu/drm/radeon/r600d.h +++ b/trunk/drivers/gpu/drm/radeon/r600d.h @@ -1342,14 +1342,6 @@ #define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */ #define PACKET3_SURFACE_BASE_UPDATE 0x73 -#define R_000011_K8_FB_LOCATION 0x11 -#define R_000012_MC_MISC_UMA_CNTL 0x12 -#define G_000012_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF) -#define R_0028F8_MC_INDEX 0x28F8 -#define S_0028F8_MC_IND_ADDR(x) (((x) & 0x1FF) << 0) -#define C_0028F8_MC_IND_ADDR 0xFFFFFE00 -#define S_0028F8_MC_IND_WR_EN(x) (((x) & 0x1) << 9) -#define R_0028FC_MC_DATA 0x28FC #define R_008020_GRBM_SOFT_RESET 0x8020 #define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0) diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index 142ce6cc69f5..1442ce765d48 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -1694,7 +1694,6 @@ struct radeon_device { int num_crtc; /* number of crtcs */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ bool audio_enabled; - bool has_uvd; struct r600_audio audio_status; /* audio stuff */ struct notifier_block acpi_nb; /* only one userspace can use Hyperz features or CMASK at a time */ @@ -1839,7 +1838,6 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \ (rdev->flags & RADEON_IS_IGP)) #define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND)) -#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN)) /* * BIOS helpers. diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.c b/trunk/drivers/gpu/drm/radeon/radeon_asic.c index a2802b47ee95..6417132c50cf 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.c @@ -122,10 +122,6 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) rdev->mc_rreg = &rs600_mc_rreg; rdev->mc_wreg = &rs600_mc_wreg; } - if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { - rdev->mc_rreg = &rs780_mc_rreg; - rdev->mc_wreg = &rs780_mc_wreg; - } if (rdev->family >= CHIP_R600) { rdev->pciep_rreg = &r600_pciep_rreg; rdev->pciep_wreg = &r600_pciep_wreg; @@ -1939,8 +1935,6 @@ int radeon_asic_init(struct radeon_device *rdev) else rdev->num_crtc = 2; - rdev->has_uvd = false; - switch (rdev->family) { case CHIP_R100: case CHIP_RV100: @@ -2005,22 +1999,16 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_RV635: case CHIP_RV670: rdev->asic = &r600_asic; - if (rdev->family == CHIP_R600) - rdev->has_uvd = false; - else - rdev->has_uvd = true; break; case CHIP_RS780: case CHIP_RS880: rdev->asic = &rs780_asic; - rdev->has_uvd = true; break; case CHIP_RV770: case CHIP_RV730: case CHIP_RV710: case CHIP_RV740: rdev->asic = &rv770_asic; - rdev->has_uvd = true; break; case CHIP_CEDAR: case CHIP_REDWOOD: @@ -2033,13 +2021,11 @@ int radeon_asic_init(struct radeon_device *rdev) else rdev->num_crtc = 6; rdev->asic = &evergreen_asic; - rdev->has_uvd = true; break; case CHIP_PALM: case CHIP_SUMO: case CHIP_SUMO2: rdev->asic = &sumo_asic; - rdev->has_uvd = true; break; case CHIP_BARTS: case CHIP_TURKS: @@ -2050,37 +2036,27 @@ int radeon_asic_init(struct radeon_device *rdev) else rdev->num_crtc = 6; rdev->asic = &btc_asic; - rdev->has_uvd = true; break; case CHIP_CAYMAN: rdev->asic = &cayman_asic; /* set num crtcs */ rdev->num_crtc = 6; - rdev->has_uvd = true; break; case CHIP_ARUBA: rdev->asic = &trinity_asic; /* set num crtcs */ rdev->num_crtc = 4; - rdev->has_uvd = true; break; case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: - case CHIP_HAINAN: rdev->asic = &si_asic; /* set num crtcs */ - if (rdev->family == CHIP_HAINAN) - rdev->num_crtc = 0; - else if (rdev->family == CHIP_OLAND) + if (rdev->family == CHIP_OLAND) rdev->num_crtc = 2; else rdev->num_crtc = 6; - if (rdev->family == CHIP_HAINAN) - rdev->has_uvd = false; - else - rdev->has_uvd = true; break; default: /* FIXME: not supported yet */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.h b/trunk/drivers/gpu/drm/radeon/radeon_asic.h index a72759ede753..2c87365d345f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.h @@ -347,8 +347,6 @@ extern bool r600_gui_idle(struct radeon_device *rdev); extern void r600_pm_misc(struct radeon_device *rdev); extern void r600_pm_init_profile(struct radeon_device *rdev); extern void rs780_pm_init_profile(struct radeon_device *rdev); -extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg); -extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int r600_get_pcie_lanes(struct radeon_device *rdev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_bios.c b/trunk/drivers/gpu/drm/radeon/radeon_bios.c index 061b227dae0c..fa3c56fba294 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_bios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_bios.c @@ -244,28 +244,24 @@ static bool ni_read_disabled_bios(struct radeon_device *rdev) /* enable the rom */ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); - if (!ASIC_IS_NODCE(rdev)) { - /* Disable VGA mode */ - WREG32(AVIVO_D1VGA_CONTROL, - (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | - AVIVO_DVGA_CONTROL_TIMING_SELECT))); - WREG32(AVIVO_D2VGA_CONTROL, - (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | - AVIVO_DVGA_CONTROL_TIMING_SELECT))); - WREG32(AVIVO_VGA_RENDER_CONTROL, - (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); - } + /* Disable VGA mode */ + WREG32(AVIVO_D1VGA_CONTROL, + (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | + AVIVO_DVGA_CONTROL_TIMING_SELECT))); + WREG32(AVIVO_D2VGA_CONTROL, + (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | + AVIVO_DVGA_CONTROL_TIMING_SELECT))); + WREG32(AVIVO_VGA_RENDER_CONTROL, + (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); r = radeon_read_bios(rdev); /* restore regs */ WREG32(R600_BUS_CNTL, bus_cntl); - if (!ASIC_IS_NODCE(rdev)) { - WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); - WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); - WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); - } + WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); + WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); + WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); WREG32(R600_ROM_CNTL, rom_cntl); return r; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c index b0dc0b6cb4e0..a8f608903989 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_device.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c @@ -94,7 +94,6 @@ static const char radeon_family_name[][16] = { "PITCAIRN", "VERDE", "OLAND", - "HAINAN", "LAST", }; @@ -244,6 +243,16 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) */ void radeon_wb_disable(struct radeon_device *rdev) { + int r; + + if (rdev->wb.wb_obj) { + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) + return; + radeon_bo_kunmap(rdev->wb.wb_obj); + radeon_bo_unpin(rdev->wb.wb_obj); + radeon_bo_unreserve(rdev->wb.wb_obj); + } rdev->wb.enabled = false; } @@ -259,11 +268,6 @@ void radeon_wb_fini(struct radeon_device *rdev) { radeon_wb_disable(rdev); if (rdev->wb.wb_obj) { - if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { - radeon_bo_kunmap(rdev->wb.wb_obj); - radeon_bo_unpin(rdev->wb.wb_obj); - radeon_bo_unreserve(rdev->wb.wb_obj); - } radeon_bo_unref(&rdev->wb.wb_obj); rdev->wb.wb = NULL; rdev->wb.wb_obj = NULL; @@ -290,26 +294,26 @@ int radeon_wb_init(struct radeon_device *rdev) dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); return r; } - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) { - radeon_wb_fini(rdev); - return r; - } - r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); - if (r) { - radeon_bo_unreserve(rdev->wb.wb_obj); - dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); - radeon_wb_fini(rdev); - return r; - } - r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); + } + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) { + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, + &rdev->wb.gpu_addr); + if (r) { radeon_bo_unreserve(rdev->wb.wb_obj); - if (r) { - dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); - radeon_wb_fini(rdev); - return r; - } + dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); + radeon_bo_unreserve(rdev->wb.wb_obj); + if (r) { + dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; } /* clear wb memory */ @@ -462,27 +466,23 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; - /* required for EFI mode on macbook2,1 which uses an r5xx asic */ if (efi_enabled(EFI_BOOT) && - (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && - (rdev->family < CHIP_R600)) + rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) return false; - if (ASIC_IS_NODCE(rdev)) - goto check_memsize; - /* first check CRTCs */ - if (ASIC_IS_DCE4(rdev)) { + if (ASIC_IS_DCE41(rdev)) { reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - if (rdev->num_crtc >= 4) { - reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); - } - if (rdev->num_crtc >= 6) { - reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); - } + if (reg & EVERGREEN_CRTC_MASTER_EN) + return true; + } else if (ASIC_IS_DCE4(rdev)) { + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); if (reg & EVERGREEN_CRTC_MASTER_EN) return true; } else if (ASIC_IS_AVIVO(rdev)) { @@ -499,7 +499,6 @@ bool radeon_card_posted(struct radeon_device *rdev) } } -check_memsize: /* then check MEM_SIZE, in case the crtcs are off */ if (rdev->family >= CHIP_R600) reg = RREG32(R600_CONFIG_MEMSIZE); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_display.c b/trunk/drivers/gpu/drm/radeon/radeon_display.c index eb18bb7af1cc..e38fd559f1ab 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_display.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_display.c @@ -271,6 +271,8 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; struct radeon_unpin_work *work; + struct drm_pending_vblank_event *e; + struct timeval now; unsigned long flags; u32 update_pending; int vpos, hpos; @@ -326,9 +328,14 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) radeon_crtc->unpin_work = NULL; /* wakeup userspace */ - if (work->event) - drm_send_vblank_event(rdev->ddev, crtc_id, work->event); - + if (work->event) { + e = work->event; + e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now); + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.c b/trunk/drivers/gpu/drm/radeon/radeon_drv.c index 094e7e5ea39e..d33f484ace48 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_drv.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.c @@ -147,7 +147,7 @@ static inline void radeon_unregister_atpx_handler(void) {} #endif int radeon_no_wb; -int radeon_modeset = -1; +int radeon_modeset = 1; int radeon_dynclks = -1; int radeon_r4xx_atom = 0; int radeon_agpmode = 0; @@ -456,16 +456,6 @@ static struct pci_driver radeon_kms_pci_driver = { static int __init radeon_init(void) { -#ifdef CONFIG_VGA_CONSOLE - if (vgacon_text_force() && radeon_modeset == -1) { - DRM_INFO("VGACON disable radeon kernel modesetting.\n"); - radeon_modeset = 0; - } -#endif - /* set to modesetting by default if not nomodeset */ - if (radeon_modeset == -1) - radeon_modeset = 1; - if (radeon_modeset == 1) { DRM_INFO("radeon kernel modesetting enabled.\n"); driver = &kms_driver; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_family.h b/trunk/drivers/gpu/drm/radeon/radeon_family.h index 36e9803b077d..2d91123f2759 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_family.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_family.h @@ -92,7 +92,6 @@ enum radeon_family { CHIP_PITCAIRN, CHIP_VERDE, CHIP_OLAND, - CHIP_HAINAN, CHIP_LAST, }; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fence.c b/trunk/drivers/gpu/drm/radeon/radeon_fence.c index ddb8f8e04eb5..5b937dfe6f65 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_fence.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_fence.c @@ -63,9 +63,7 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) { struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; if (likely(rdev->wb.enabled || !drv->scratch_reg)) { - if (drv->cpu_addr) { - *drv->cpu_addr = cpu_to_le32(seq); - } + *drv->cpu_addr = cpu_to_le32(seq); } else { WREG32(drv->scratch_reg, seq); } @@ -86,11 +84,7 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) u32 seq = 0; if (likely(rdev->wb.enabled || !drv->scratch_reg)) { - if (drv->cpu_addr) { - seq = le32_to_cpu(*drv->cpu_addr); - } else { - seq = lower_32_bits(atomic64_read(&drv->last_seq)); - } + seq = le32_to_cpu(*drv->cpu_addr); } else { seq = RREG32(drv->scratch_reg); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gart.c b/trunk/drivers/gpu/drm/radeon/radeon_gart.c index 43ec4a401f07..2c1341f63dc5 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gart.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gart.c @@ -1197,13 +1197,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, int radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_bo_va *bo_va) { - int r = 0; + int r; mutex_lock(&rdev->vm_manager.lock); mutex_lock(&bo_va->vm->mutex); - if (bo_va->soffset) { - r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); - } + r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); mutex_unlock(&rdev->vm_manager.lock); list_del(&bo_va->vm_list); mutex_unlock(&bo_va->vm->mutex); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 7cb178a34a0f..6857cb4efb76 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -1031,9 +1031,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, static void radeon_crtc_prepare(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; + radeon_crtc->in_mode_set = true; /* * The hardware wedges sometimes if you reconfigure one CRTC * whilst another is running (see fdo bug #24611). @@ -1044,6 +1046,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc) static void radeon_crtc_commit(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; @@ -1054,6 +1057,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc) if (crtci->enabled) radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); } + radeon_crtc->in_mode_set = false; } static const struct drm_crtc_helper_funcs legacy_helper_funcs = { diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h index 69ad4fe224c1..44e579e75fd0 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h @@ -302,6 +302,7 @@ struct radeon_crtc { u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; + bool in_mode_set; uint32_t crtc_offset; struct drm_gem_object *cursor_bo; uint64_t cursor_addr; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ring.c b/trunk/drivers/gpu/drm/radeon/radeon_ring.c index 82434018cbe8..e17faa7cf732 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ring.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ring.c @@ -402,13 +402,6 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi return -ENOMEM; /* Align requested size with padding so unlock_commit can * pad safely */ - radeon_ring_free_size(rdev, ring); - if (ring->ring_free_dw == (ring->ring_size / 4)) { - /* This is an empty ring update lockup info to avoid - * false positive. - */ - radeon_ring_lockup_update(ring); - } ndw = (ndw + ring->align_mask) & ~ring->align_mask; while (ndw > (ring->ring_free_dw - 1)) { radeon_ring_free_size(rdev, ring); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c index 6c0ce8915fac..93f760e27a92 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c @@ -726,7 +726,7 @@ int radeon_ttm_init(struct radeon_device *rdev) return r; } DRM_INFO("radeon: %uM of VRAM memory ready\n", - (unsigned) (rdev->mc.real_vram_size / (1024 * 1024))); + (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, rdev->mc.gtt_size >> PAGE_SHIFT); if (r) { diff --git a/trunk/drivers/gpu/drm/radeon/radeon_uvd.c b/trunk/drivers/gpu/drm/radeon/radeon_uvd.c index cad735dd02c6..906e5c0ca3b9 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_uvd.c @@ -159,17 +159,7 @@ int radeon_uvd_suspend(struct radeon_device *rdev) if (!r) { radeon_bo_kunmap(rdev->uvd.vcpu_bo); radeon_bo_unpin(rdev->uvd.vcpu_bo); - rdev->uvd.cpu_addr = NULL; - if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) { - radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); - } radeon_bo_unreserve(rdev->uvd.vcpu_bo); - - if (rdev->uvd.cpu_addr) { - radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); - } else { - rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL; - } } return r; } @@ -188,10 +178,6 @@ int radeon_uvd_resume(struct radeon_device *rdev) return r; } - /* Have been pin in cpu unmap unpin */ - radeon_bo_kunmap(rdev->uvd.vcpu_bo); - radeon_bo_unpin(rdev->uvd.vcpu_bo); - r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, &rdev->uvd.gpu_addr); if (r) { @@ -627,19 +613,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, } /* stitch together an UVD create msg */ - msg[0] = cpu_to_le32(0x00000de4); - msg[1] = cpu_to_le32(0x00000000); - msg[2] = cpu_to_le32(handle); - msg[3] = cpu_to_le32(0x00000000); - msg[4] = cpu_to_le32(0x00000000); - msg[5] = cpu_to_le32(0x00000000); - msg[6] = cpu_to_le32(0x00000000); - msg[7] = cpu_to_le32(0x00000780); - msg[8] = cpu_to_le32(0x00000440); - msg[9] = cpu_to_le32(0x00000000); - msg[10] = cpu_to_le32(0x01b37000); + msg[0] = 0x00000de4; + msg[1] = 0x00000000; + msg[2] = handle; + msg[3] = 0x00000000; + msg[4] = 0x00000000; + msg[5] = 0x00000000; + msg[6] = 0x00000000; + msg[7] = 0x00000780; + msg[8] = 0x00000440; + msg[9] = 0x00000000; + msg[10] = 0x01b37000; for (i = 11; i < 1024; ++i) - msg[i] = cpu_to_le32(0x0); + msg[i] = 0x0; radeon_bo_kunmap(bo); radeon_bo_unreserve(bo); @@ -673,12 +659,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, } /* stitch together an UVD destroy msg */ - msg[0] = cpu_to_le32(0x00000de4); - msg[1] = cpu_to_le32(0x00000002); - msg[2] = cpu_to_le32(handle); - msg[3] = cpu_to_le32(0x00000000); + msg[0] = 0x00000de4; + msg[1] = 0x00000002; + msg[2] = handle; + msg[3] = 0x00000000; for (i = 4; i < 1024; ++i) - msg[i] = cpu_to_le32(0x0); + msg[i] = 0x0; radeon_bo_kunmap(bo); radeon_bo_unreserve(bo); diff --git a/trunk/drivers/gpu/drm/radeon/rs400.c b/trunk/drivers/gpu/drm/radeon/rs400.c index 233a9b9fa1f7..73051ce3121e 100644 --- a/trunk/drivers/gpu/drm/radeon/rs400.c +++ b/trunk/drivers/gpu/drm/radeon/rs400.c @@ -417,12 +417,6 @@ static int rs400_startup(struct radeon_device *rdev) } /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -539,6 +533,9 @@ int rs400_init(struct radeon_device *rdev) rs400_mc_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); if (r) return r; /* Memory manager */ diff --git a/trunk/drivers/gpu/drm/radeon/rs600.c b/trunk/drivers/gpu/drm/radeon/rs600.c index 670b555d2ca2..46fa1b07c560 100644 --- a/trunk/drivers/gpu/drm/radeon/rs600.c +++ b/trunk/drivers/gpu/drm/radeon/rs600.c @@ -923,12 +923,6 @@ static int rs600_startup(struct radeon_device *rdev) } /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -1051,6 +1045,9 @@ int rs600_init(struct radeon_device *rdev) rs600_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); if (r) return r; /* Memory manager */ diff --git a/trunk/drivers/gpu/drm/radeon/rs690.c b/trunk/drivers/gpu/drm/radeon/rs690.c index 55880d5962c3..ab4c86cfd552 100644 --- a/trunk/drivers/gpu/drm/radeon/rs690.c +++ b/trunk/drivers/gpu/drm/radeon/rs690.c @@ -651,12 +651,6 @@ static int rs690_startup(struct radeon_device *rdev) } /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -780,6 +774,9 @@ int rs690_init(struct radeon_device *rdev) rv515_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); if (r) return r; /* Memory manager */ diff --git a/trunk/drivers/gpu/drm/radeon/rv515.c b/trunk/drivers/gpu/drm/radeon/rv515.c index 21c7d7b26e55..ffcba730c57c 100644 --- a/trunk/drivers/gpu/drm/radeon/rv515.c +++ b/trunk/drivers/gpu/drm/radeon/rv515.c @@ -532,12 +532,6 @@ static int rv515_startup(struct radeon_device *rdev) } /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -666,6 +660,9 @@ int rv515_init(struct radeon_device *rdev) rv515_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); if (r) return r; /* Memory manager */ diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c index 4a62ad2e5399..83f612a9500b 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770.c +++ b/trunk/drivers/gpu/drm/radeon/rv770.c @@ -862,9 +862,11 @@ int rv770_uvd_resume(struct radeon_device *rdev) chip_id = 0x0100000b; break; case CHIP_SUMO: - case CHIP_SUMO2: chip_id = 0x0100000c; break; + case CHIP_SUMO2: + chip_id = 0x0100000d; + break; case CHIP_PALM: chip_id = 0x0100000e; break; @@ -1887,12 +1889,6 @@ static int rv770_startup(struct radeon_device *rdev) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - r = r600_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); @@ -2051,6 +2047,10 @@ int rv770_init(struct radeon_device *rdev) if (r) return r; + r = radeon_irq_kms_init(rdev); + if (r) + return r; + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -2113,6 +2113,8 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev) { u32 link_width_cntl, lanes, speed_cntl, tmp; u16 link_cntl2; + u32 mask; + int ret; if (radeon_pcie_gen2 == 0) return; @@ -2127,8 +2129,11 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev) if (ASIC_IS_X2(rdev)) return; - if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) && - (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT)) + ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & DRM_PCIE_SPEED_50)) return; DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); diff --git a/trunk/drivers/gpu/drm/radeon/si.c b/trunk/drivers/gpu/drm/radeon/si.c index a1b0da6b5808..f0b6c2f87c4d 100644 --- a/trunk/drivers/gpu/drm/radeon/si.c +++ b/trunk/drivers/gpu/drm/radeon/si.c @@ -60,11 +60,6 @@ MODULE_FIRMWARE("radeon/OLAND_me.bin"); MODULE_FIRMWARE("radeon/OLAND_ce.bin"); MODULE_FIRMWARE("radeon/OLAND_mc.bin"); MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); -MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); -MODULE_FIRMWARE("radeon/HAINAN_me.bin"); -MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); -MODULE_FIRMWARE("radeon/HAINAN_mc.bin"); -MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); @@ -270,40 +265,6 @@ static const u32 oland_golden_registers[] = 0x15c0, 0x000c0fc0, 0x000c0400 }; -static const u32 hainan_golden_registers[] = -{ - 0x9a10, 0x00010000, 0x00018208, - 0x9830, 0xffffffff, 0x00000000, - 0x9834, 0xf00fffff, 0x00000400, - 0x9838, 0x0002021c, 0x00020200, - 0xd0c0, 0xff000fff, 0x00000100, - 0xd030, 0x000300c0, 0x00800040, - 0xd8c0, 0xff000fff, 0x00000100, - 0xd830, 0x000300c0, 0x00800040, - 0x2ae4, 0x00073ffe, 0x000022a2, - 0x240c, 0x000007ff, 0x00000000, - 0x8a14, 0xf000001f, 0x00000007, - 0x8b24, 0xffffffff, 0x00ffffff, - 0x8b10, 0x0000ff0f, 0x00000000, - 0x28a4c, 0x07ffffff, 0x4e000000, - 0x28350, 0x3f3f3fff, 0x00000000, - 0x30, 0x000000ff, 0x0040, - 0x34, 0x00000040, 0x00004040, - 0x9100, 0x03e00000, 0x03600000, - 0x9060, 0x0000007f, 0x00000020, - 0x9508, 0x00010000, 0x00010000, - 0xac14, 0x000003ff, 0x000000f1, - 0xac10, 0xffffffff, 0x00000000, - 0xac0c, 0xffffffff, 0x00003210, - 0x88d4, 0x0000001f, 0x00000010, - 0x15c0, 0x000c0fc0, 0x000c0400 -}; - -static const u32 hainan_golden_registers2[] = -{ - 0x98f8, 0xffffffff, 0x02010001 -}; - static const u32 tahiti_mgcg_cgcg_init[] = { 0xc400, 0xffffffff, 0xfffffffc, @@ -712,83 +673,6 @@ static const u32 oland_mgcg_cgcg_init[] = 0xd8c0, 0xfffffff0, 0x00000100 }; -static const u32 hainan_mgcg_cgcg_init[] = -{ - 0xc400, 0xffffffff, 0xfffffffc, - 0x802c, 0xffffffff, 0xe0000000, - 0x9a60, 0xffffffff, 0x00000100, - 0x92a4, 0xffffffff, 0x00000100, - 0xc164, 0xffffffff, 0x00000100, - 0x9774, 0xffffffff, 0x00000100, - 0x8984, 0xffffffff, 0x06000100, - 0x8a18, 0xffffffff, 0x00000100, - 0x92a0, 0xffffffff, 0x00000100, - 0xc380, 0xffffffff, 0x00000100, - 0x8b28, 0xffffffff, 0x00000100, - 0x9144, 0xffffffff, 0x00000100, - 0x8d88, 0xffffffff, 0x00000100, - 0x8d8c, 0xffffffff, 0x00000100, - 0x9030, 0xffffffff, 0x00000100, - 0x9034, 0xffffffff, 0x00000100, - 0x9038, 0xffffffff, 0x00000100, - 0x903c, 0xffffffff, 0x00000100, - 0xad80, 0xffffffff, 0x00000100, - 0xac54, 0xffffffff, 0x00000100, - 0x897c, 0xffffffff, 0x06000100, - 0x9868, 0xffffffff, 0x00000100, - 0x9510, 0xffffffff, 0x00000100, - 0xaf04, 0xffffffff, 0x00000100, - 0xae04, 0xffffffff, 0x00000100, - 0x949c, 0xffffffff, 0x00000100, - 0x802c, 0xffffffff, 0xe0000000, - 0x9160, 0xffffffff, 0x00010000, - 0x9164, 0xffffffff, 0x00030002, - 0x9168, 0xffffffff, 0x00040007, - 0x916c, 0xffffffff, 0x00060005, - 0x9170, 0xffffffff, 0x00090008, - 0x9174, 0xffffffff, 0x00020001, - 0x9178, 0xffffffff, 0x00040003, - 0x917c, 0xffffffff, 0x00000007, - 0x9180, 0xffffffff, 0x00060005, - 0x9184, 0xffffffff, 0x00090008, - 0x9188, 0xffffffff, 0x00030002, - 0x918c, 0xffffffff, 0x00050004, - 0x9190, 0xffffffff, 0x00000008, - 0x9194, 0xffffffff, 0x00070006, - 0x9198, 0xffffffff, 0x000a0009, - 0x919c, 0xffffffff, 0x00040003, - 0x91a0, 0xffffffff, 0x00060005, - 0x91a4, 0xffffffff, 0x00000009, - 0x91a8, 0xffffffff, 0x00080007, - 0x91ac, 0xffffffff, 0x000b000a, - 0x91b0, 0xffffffff, 0x00050004, - 0x91b4, 0xffffffff, 0x00070006, - 0x91b8, 0xffffffff, 0x0008000b, - 0x91bc, 0xffffffff, 0x000a0009, - 0x91c0, 0xffffffff, 0x000d000c, - 0x91c4, 0xffffffff, 0x00060005, - 0x91c8, 0xffffffff, 0x00080007, - 0x91cc, 0xffffffff, 0x0000000b, - 0x91d0, 0xffffffff, 0x000a0009, - 0x91d4, 0xffffffff, 0x000d000c, - 0x9150, 0xffffffff, 0x96940200, - 0x8708, 0xffffffff, 0x00900100, - 0xc478, 0xffffffff, 0x00000080, - 0xc404, 0xffffffff, 0x0020003f, - 0x30, 0xffffffff, 0x0000001c, - 0x34, 0x000f0000, 0x000f0000, - 0x160c, 0xffffffff, 0x00000100, - 0x1024, 0xffffffff, 0x00000100, - 0x20a8, 0xffffffff, 0x00000104, - 0x264c, 0x000c0000, 0x000c0000, - 0x2648, 0x000c0000, 0x000c0000, - 0x2f50, 0x00000001, 0x00000001, - 0x30cc, 0xc0000fff, 0x00000104, - 0xc1e4, 0x00000001, 0x00000001, - 0xd0c0, 0xfffffff0, 0x00000100, - 0xd8c0, 0xfffffff0, 0x00000100 -}; - static u32 verde_pg_init[] = { 0x353c, 0xffffffff, 0x40000, @@ -969,17 +853,6 @@ static void si_init_golden_registers(struct radeon_device *rdev) oland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); break; - case CHIP_HAINAN: - radeon_program_register_sequence(rdev, - hainan_golden_registers, - (const u32)ARRAY_SIZE(hainan_golden_registers)); - radeon_program_register_sequence(rdev, - hainan_golden_registers2, - (const u32)ARRAY_SIZE(hainan_golden_registers2)); - radeon_program_register_sequence(rdev, - hainan_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init)); - break; default: break; } @@ -1189,45 +1062,6 @@ static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { {0x0000009f, 0x00a17730} }; -static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { - {0x0000006f, 0x03044000}, - {0x00000070, 0x0480c018}, - {0x00000071, 0x00000040}, - {0x00000072, 0x01000000}, - {0x00000074, 0x000000ff}, - {0x00000075, 0x00143400}, - {0x00000076, 0x08ec0800}, - {0x00000077, 0x040000cc}, - {0x00000079, 0x00000000}, - {0x0000007a, 0x21000409}, - {0x0000007c, 0x00000000}, - {0x0000007d, 0xe8000000}, - {0x0000007e, 0x044408a8}, - {0x0000007f, 0x00000003}, - {0x00000080, 0x00000000}, - {0x00000081, 0x01000000}, - {0x00000082, 0x02000000}, - {0x00000083, 0x00000000}, - {0x00000084, 0xe3f3e4f4}, - {0x00000085, 0x00052024}, - {0x00000087, 0x00000000}, - {0x00000088, 0x66036603}, - {0x00000089, 0x01000000}, - {0x0000008b, 0x1c0a0000}, - {0x0000008c, 0xff010000}, - {0x0000008e, 0xffffefff}, - {0x0000008f, 0xfff3efff}, - {0x00000090, 0xfff3efbf}, - {0x00000094, 0x00101101}, - {0x00000095, 0x00000fff}, - {0x00000096, 0x00116fff}, - {0x00000097, 0x60010000}, - {0x00000098, 0x10010000}, - {0x00000099, 0x00006000}, - {0x0000009a, 0x00001000}, - {0x0000009f, 0x00a07730} -}; - /* ucode loading */ static int si_mc_load_microcode(struct radeon_device *rdev) { @@ -1261,11 +1095,6 @@ static int si_mc_load_microcode(struct radeon_device *rdev) ucode_size = OLAND_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; - case CHIP_HAINAN: - io_mc_regs = (u32 *)&hainan_io_mc_regs; - ucode_size = OLAND_MC_UCODE_SIZE; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; } running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; @@ -1369,15 +1198,6 @@ static int si_init_microcode(struct radeon_device *rdev) rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = OLAND_MC_UCODE_SIZE * 4; break; - case CHIP_HAINAN: - chip_name = "HAINAN"; - rlc_chip_name = "HAINAN"; - pfp_req_size = SI_PFP_UCODE_SIZE * 4; - me_req_size = SI_PM4_UCODE_SIZE * 4; - ce_req_size = SI_CE_UCODE_SIZE * 4; - rlc_req_size = SI_RLC_UCODE_SIZE * 4; - mc_req_size = OLAND_MC_UCODE_SIZE * 4; - break; default: BUG(); } @@ -2183,8 +2003,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev) WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if ((rdev->family == CHIP_VERDE) || - (rdev->family == CHIP_OLAND) || - (rdev->family == CHIP_HAINAN)) { + (rdev->family == CHIP_OLAND)) { for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: /* non-AA compressed depth or any compressed stencil */ @@ -2616,7 +2435,7 @@ static void si_gpu_init(struct radeon_device *rdev) default: rdev->config.si.max_shader_engines = 1; rdev->config.si.max_tile_pipes = 4; - rdev->config.si.max_cu_per_sh = 5; + rdev->config.si.max_cu_per_sh = 2; rdev->config.si.max_sh_per_se = 2; rdev->config.si.max_backends_per_se = 4; rdev->config.si.max_texture_channel_caches = 4; @@ -2647,23 +2466,6 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; break; - case CHIP_HAINAN: - rdev->config.si.max_shader_engines = 1; - rdev->config.si.max_tile_pipes = 4; - rdev->config.si.max_cu_per_sh = 5; - rdev->config.si.max_sh_per_se = 1; - rdev->config.si.max_backends_per_se = 1; - rdev->config.si.max_texture_channel_caches = 2; - rdev->config.si.max_gprs = 256; - rdev->config.si.max_gs_threads = 16; - rdev->config.si.max_hw_contexts = 8; - - rdev->config.si.sc_prim_fifo_size_frontend = 0x20; - rdev->config.si.sc_prim_fifo_size_backend = 0x40; - rdev->config.si.sc_hiz_tile_fifo_size = 0x30; - rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; - gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN; - break; } /* Initialize HDP */ @@ -2757,11 +2559,9 @@ static void si_gpu_init(struct radeon_device *rdev) WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); - if (rdev->has_uvd) { - WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); - WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); - WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); - } + WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); + WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); + WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); si_tiling_mode_table_init(rdev); @@ -3504,9 +3304,8 @@ static void si_mc_program(struct radeon_device *rdev) if (radeon_mc_wait_for_idle(rdev)) { dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } - if (!ASIC_IS_NODCE(rdev)) - /* Lockout access through VGA aperture*/ - WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); + /* Lockout access through VGA aperture*/ + WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); /* Update configuration */ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); @@ -3528,11 +3327,9 @@ static void si_mc_program(struct radeon_device *rdev) dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } evergreen_mc_resume(rdev, &save); - if (!ASIC_IS_NODCE(rdev)) { - /* we need to own VRAM, so turn off the VGA renderer here - * to stop it overwriting our objects */ - rv515_vga_render_disable(rdev); - } + /* we need to own VRAM, so turn off the VGA renderer here + * to stop it overwriting our objects */ + rv515_vga_render_disable(rdev); } static void si_vram_gtt_location(struct radeon_device *rdev, @@ -3600,8 +3397,8 @@ static int si_mc_init(struct radeon_device *rdev) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* size in MB on si */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; rdev->mc.visible_vram_size = rdev->mc.aper_size; si_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); @@ -4454,10 +4251,8 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); WREG32(GRBM_INT_CNTL, 0); - if (rdev->num_crtc >= 2) { - WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - } + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); if (rdev->num_crtc >= 4) { WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); @@ -4467,10 +4262,8 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } - if (rdev->num_crtc >= 2) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - } + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); if (rdev->num_crtc >= 4) { WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); @@ -4480,22 +4273,21 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } - if (!ASIC_IS_NODCE(rdev)) { - WREG32(DACA_AUTODETECT_INT_CONTROL, 0); + WREG32(DACA_AUTODETECT_INT_CONTROL, 0); + + tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD1_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD2_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD3_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD4_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD5_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD6_INT_CONTROL, tmp); - } } static int si_irq_init(struct radeon_device *rdev) @@ -4574,7 +4366,7 @@ int si_irq_set(struct radeon_device *rdev) u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; - u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; + u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 dma_cntl, dma_cntl1; @@ -4591,14 +4383,12 @@ int si_irq_set(struct radeon_device *rdev) return 0; } - if (!ASIC_IS_NODCE(rdev)) { - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; - } + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; @@ -4689,10 +4479,8 @@ int si_irq_set(struct radeon_device *rdev) WREG32(GRBM_INT_CNTL, grbm_int_cntl); - if (rdev->num_crtc >= 2) { - WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); - WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); - } + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); if (rdev->num_crtc >= 4) { WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); @@ -4702,10 +4490,8 @@ int si_irq_set(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } - if (rdev->num_crtc >= 2) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); - } + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); if (rdev->num_crtc >= 4) { WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); @@ -4715,14 +4501,12 @@ int si_irq_set(struct radeon_device *rdev) WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); } - if (!ASIC_IS_NODCE(rdev)) { - WREG32(DC_HPD1_INT_CONTROL, hpd1); - WREG32(DC_HPD2_INT_CONTROL, hpd2); - WREG32(DC_HPD3_INT_CONTROL, hpd3); - WREG32(DC_HPD4_INT_CONTROL, hpd4); - WREG32(DC_HPD5_INT_CONTROL, hpd5); - WREG32(DC_HPD6_INT_CONTROL, hpd6); - } + WREG32(DC_HPD1_INT_CONTROL, hpd1); + WREG32(DC_HPD2_INT_CONTROL, hpd2); + WREG32(DC_HPD3_INT_CONTROL, hpd3); + WREG32(DC_HPD4_INT_CONTROL, hpd4); + WREG32(DC_HPD5_INT_CONTROL, hpd5); + WREG32(DC_HPD6_INT_CONTROL, hpd6); return 0; } @@ -4731,9 +4515,6 @@ static inline void si_irq_ack(struct radeon_device *rdev) { u32 tmp; - if (ASIC_IS_NODCE(rdev)) - return; - rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); @@ -5337,25 +5118,17 @@ static int si_startup(struct radeon_device *rdev) return r; } - if (rdev->has_uvd) { - r = rv770_uvd_resume(rdev); - if (!r) { - r = radeon_fence_driver_start_ring(rdev, - R600_RING_TYPE_UVD_INDEX); - if (r) - dev_err(rdev->dev, "UVD fences init error (%d).\n", r); - } + r = rv770_uvd_resume(rdev); + if (!r) { + r = radeon_fence_driver_start_ring(rdev, + R600_RING_TYPE_UVD_INDEX); if (r) - rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; + dev_err(rdev->dev, "UVD fences init error (%d).\n", r); } + if (r) + rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; /* Enable IRQ */ - if (!rdev->irq.installed) { - r = radeon_irq_kms_init(rdev); - if (r) - return r; - } - r = si_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); @@ -5412,18 +5185,16 @@ static int si_startup(struct radeon_device *rdev) if (r) return r; - if (rdev->has_uvd) { - ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, - UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); - if (!r) - r = r600_uvd_init(rdev); - if (r) - DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); - } + ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + if (ring->ring_size) { + r = radeon_ring_init(rdev, ring, ring->ring_size, + R600_WB_UVD_RPTR_OFFSET, + UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, + 0, 0xfffff, RADEON_CP_PACKET2); + if (!r) + r = r600_uvd_init(rdev); + if (r) + DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } r = radeon_ib_pool_init(rdev); @@ -5472,10 +5243,8 @@ int si_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); - if (rdev->has_uvd) { - r600_uvd_rbc_stop(rdev); - radeon_uvd_suspend(rdev); - } + r600_uvd_rbc_stop(rdev); + radeon_uvd_suspend(rdev); si_irq_suspend(rdev); radeon_wb_disable(rdev); si_pcie_gart_disable(rdev); @@ -5539,6 +5308,10 @@ int si_init(struct radeon_device *rdev) if (r) return r; + r = radeon_irq_kms_init(rdev); + if (r) + return r; + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); @@ -5559,13 +5332,11 @@ int si_init(struct radeon_device *rdev) ring->ring_obj = NULL; r600_ring_init(rdev, ring, 64 * 1024); - if (rdev->has_uvd) { - r = radeon_uvd_init(rdev); - if (!r) { - ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - ring->ring_obj = NULL; - r600_ring_init(rdev, ring, 4096); - } + r = radeon_uvd_init(rdev); + if (!r) { + ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 4096); } rdev->ih.ring_obj = NULL; @@ -5613,8 +5384,7 @@ void si_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - if (rdev->has_uvd) - radeon_uvd_fini(rdev); + radeon_uvd_fini(rdev); si_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/sid.h b/trunk/drivers/gpu/drm/radeon/sid.h index 8f2d7d4f9b28..222877ba6cf5 100644 --- a/trunk/drivers/gpu/drm/radeon/sid.h +++ b/trunk/drivers/gpu/drm/radeon/sid.h @@ -28,7 +28,6 @@ #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 -#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001 /* discrete uvd clocks */ #define CG_UPLL_FUNC_CNTL 0x634 diff --git a/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index 99e2034e49cc..7dff49ed66e7 100644 --- a/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -451,16 +451,27 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc) { struct drm_pending_vblank_event *event; struct drm_device *dev = scrtc->crtc.dev; + struct timeval vblanktime; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); event = scrtc->event; scrtc->event = NULL; - if (event) { - drm_send_vblank_event(dev, 0, event); - drm_vblank_put(dev, 0); - } spin_unlock_irqrestore(&dev->event_lock, flags); + + if (event == NULL) + return; + + event->event.sequence = drm_vblank_count_and_time(dev, 0, &vblanktime); + event->event.tv_sec = vblanktime.tv_sec; + event->event.tv_usec = vblanktime.tv_usec; + + spin_lock_irqsave(&dev->event_lock, flags); + list_add_tail(&event->base.link, &event->base.file_priv->event_list); + wake_up_interruptible(&event->base.file_priv->event_wait); + spin_unlock_irqrestore(&dev->event_lock, flags); + + drm_vblank_put(dev, 0); } static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, diff --git a/trunk/drivers/gpu/drm/tilcdc/Kconfig b/trunk/drivers/gpu/drm/tilcdc/Kconfig index 7a4d10106906..e461e9972455 100644 --- a/trunk/drivers/gpu/drm/tilcdc/Kconfig +++ b/trunk/drivers/gpu/drm/tilcdc/Kconfig @@ -6,7 +6,6 @@ config DRM_TILCDC select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS select BACKLIGHT_CLASS_DEVICE - select BACKLIGHT_LCD_SUPPORT help Choose this option if you have an TI SoC with LCDC display controller, for example AM33xx in beagle-bone, DA8xx, or diff --git a/trunk/drivers/gpu/host1x/drm/dc.c b/trunk/drivers/gpu/host1x/drm/dc.c index 8c04943f82e3..1e2060324f02 100644 --- a/trunk/drivers/gpu/host1x/drm/dc.c +++ b/trunk/drivers/gpu/host1x/drm/dc.c @@ -1128,6 +1128,11 @@ static int tegra_dc_probe(struct platform_device *pdev) return err; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(&pdev->dev, "failed to get registers\n"); + return -ENXIO; + } + dc->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(dc->regs)) return PTR_ERR(dc->regs); diff --git a/trunk/drivers/hid/hid-multitouch.c b/trunk/drivers/hid/hid-multitouch.c index d39a5cede0b0..dc3ae5c56f56 100644 --- a/trunk/drivers/hid/hid-multitouch.c +++ b/trunk/drivers/hid/hid-multitouch.c @@ -264,12 +264,9 @@ static struct mt_class mt_classes[] = { static void mt_free_input_name(struct hid_input *hi) { struct hid_device *hdev = hi->report->device; - const char *name = hi->input->name; - if (name != hdev->name) { - hi->input->name = hdev->name; - kfree(name); - } + if (hi->input->name != hdev->name) + kfree(hi->input->name); } static ssize_t mt_show_quirks(struct device *dev, @@ -1043,11 +1040,11 @@ static void mt_remove(struct hid_device *hdev) struct hid_input *hi; sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); + hid_hw_stop(hdev); + list_for_each_entry(hi, &hdev->inputs, list) mt_free_input_name(hi); - hid_hw_stop(hdev); - kfree(td); hid_set_drvdata(hdev, NULL); } diff --git a/trunk/drivers/hv/channel_mgmt.c b/trunk/drivers/hv/channel_mgmt.c index 21ef68934a20..bad8128b283a 100644 --- a/trunk/drivers/hv/channel_mgmt.c +++ b/trunk/drivers/hv/channel_mgmt.c @@ -329,7 +329,7 @@ static u32 get_vp_index(uuid_le *type_guid) return 0; } cur_cpu = (++next_vp % max_cpus); - return hv_context.vp_index[cur_cpu]; + return cur_cpu; } /* diff --git a/trunk/drivers/hwmon/abituguru.c b/trunk/drivers/hwmon/abituguru.c index 2ebd6ce46108..df0b69987914 100644 --- a/trunk/drivers/hwmon/abituguru.c +++ b/trunk/drivers/hwmon/abituguru.c @@ -1414,18 +1414,14 @@ static int abituguru_probe(struct platform_device *pdev) pr_info("found Abit uGuru\n"); /* Register sysfs hooks */ - for (i = 0; i < sysfs_attr_i; i++) { - res = device_create_file(&pdev->dev, - &data->sysfs_attr[i].dev_attr); - if (res) + for (i = 0; i < sysfs_attr_i; i++) + if (device_create_file(&pdev->dev, + &data->sysfs_attr[i].dev_attr)) goto abituguru_probe_error; - } - for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) { - res = device_create_file(&pdev->dev, - &abituguru_sysfs_attr[i].dev_attr); - if (res) + for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) + if (device_create_file(&pdev->dev, + &abituguru_sysfs_attr[i].dev_attr)) goto abituguru_probe_error; - } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (!IS_ERR(data->hwmon_dev)) diff --git a/trunk/drivers/hwmon/adm1021.c b/trunk/drivers/hwmon/adm1021.c index f920619cd6da..7e76922a4ba9 100644 --- a/trunk/drivers/hwmon/adm1021.c +++ b/trunk/drivers/hwmon/adm1021.c @@ -331,68 +331,26 @@ static int adm1021_detect(struct i2c_client *client, man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID); dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID); - if (man_id < 0 || dev_id < 0) - return -ENODEV; - if (man_id == 0x4d && dev_id == 0x01) type_name = "max1617a"; else if (man_id == 0x41) { if ((dev_id & 0xF0) == 0x30) type_name = "adm1023"; - else if ((dev_id & 0xF0) == 0x00) - type_name = "adm1021"; else - return -ENODEV; + type_name = "adm1021"; } else if (man_id == 0x49) type_name = "thmc10"; else if (man_id == 0x23) type_name = "gl523sm"; else if (man_id == 0x54) type_name = "mc1066"; - else { - int lte, rte, lhi, rhi, llo, rlo; - - /* extra checks for LM84 and MAX1617 to avoid misdetections */ - - llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0)); - rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1)); - - /* fail if any of the additional register reads failed */ - if (llo < 0 || rlo < 0) - return -ENODEV; - - lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0)); - rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1)); - lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0)); - rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1)); - - /* - * Fail for negative temperatures and negative high limits. - * This check also catches read errors on the tested registers. - */ - if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0) - return -ENODEV; - - /* fail if all registers hold the same value */ - if (lte == rte && lte == lhi && lte == rhi && lte == llo - && lte == rlo) - return -ENODEV; - - /* - * LM84 Mfr ID is in a different place, - * and it has more unused bits. - */ - if (conv_rate == 0x00 - && (config & 0x7F) == 0x00 - && (status & 0xAB) == 0x00) { - type_name = "lm84"; - } else { - /* fail if low limits are larger than high limits */ - if ((s8)llo > lhi || (s8)rlo > rhi) - return -ENODEV; - type_name = "max1617"; - } - } + /* LM84 Mfr ID in a different place, and it has more unused bits */ + else if (conv_rate == 0x00 + && (config & 0x7F) == 0x00 + && (status & 0xAB) == 0x00) + type_name = "lm84"; + else + type_name = "max1617"; pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n", type_name, i2c_adapter_id(adapter), client->addr); diff --git a/trunk/drivers/hwmon/iio_hwmon.c b/trunk/drivers/hwmon/iio_hwmon.c index 52b77afebde1..aafa4531b961 100644 --- a/trunk/drivers/hwmon/iio_hwmon.c +++ b/trunk/drivers/hwmon/iio_hwmon.c @@ -84,10 +84,8 @@ static int iio_hwmon_probe(struct platform_device *pdev) return PTR_ERR(channels); st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); - if (st == NULL) { - ret = -ENOMEM; - goto error_release_channels; - } + if (st == NULL) + return -ENOMEM; st->channels = channels; @@ -161,7 +159,7 @@ static int iio_hwmon_probe(struct platform_device *pdev) error_remove_group: sysfs_remove_group(&dev->kobj, &st->attr_group); error_release_channels: - iio_channel_release_all(channels); + iio_channel_release_all(st->channels); return ret; } diff --git a/trunk/drivers/hwmon/nct6775.c b/trunk/drivers/hwmon/nct6775.c index 04638aee9039..f43f5e571db9 100644 --- a/trunk/drivers/hwmon/nct6775.c +++ b/trunk/drivers/hwmon/nct6775.c @@ -3705,10 +3705,8 @@ static int nct6775_probe(struct platform_device *pdev) data->have_temp |= 1 << i; data->have_temp_fixed |= 1 << i; data->reg_temp[0][i] = reg_temp_alternate[i]; - if (i < num_reg_temp) { - data->reg_temp[1][i] = reg_temp_over[i]; - data->reg_temp[2][i] = reg_temp_hyst[i]; - } + data->reg_temp[1][i] = reg_temp_over[i]; + data->reg_temp[2][i] = reg_temp_hyst[i]; data->temp_src[i] = i + 1; continue; } diff --git a/trunk/drivers/hwmon/tmp401.c b/trunk/drivers/hwmon/tmp401.c index dfe6d9527efb..a478454f690f 100644 --- a/trunk/drivers/hwmon/tmp401.c +++ b/trunk/drivers/hwmon/tmp401.c @@ -240,7 +240,7 @@ static struct tmp401_data *tmp401_update_device(struct device *dev) mutex_lock(&data->update_lock); next_update = data->last_updated + - msecs_to_jiffies(data->update_interval); + msecs_to_jiffies(data->update_interval) + 1; if (time_after(jiffies, next_update) || !data->valid) { if (data->kind != tmp432) { /* diff --git a/trunk/drivers/i2c/busses/i2c-designware-core.c b/trunk/drivers/i2c/busses/i2c-designware-core.c index c41ca6354fc5..21fbb340ad66 100644 --- a/trunk/drivers/i2c/busses/i2c-designware-core.c +++ b/trunk/drivers/i2c/busses/i2c-designware-core.c @@ -383,8 +383,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) /* Enable the adapter */ __i2c_dw_enable(dev, true); - /* Clear and enable interrupts */ - i2c_dw_clear_int(dev); + /* Enable interrupts */ dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK); } @@ -449,14 +448,8 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) cmd |= BIT(9); if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { - - /* avoid rx buffer overrun */ - if (rx_limit - dev->rx_outstanding <= 0) - break; - dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); rx_limit--; - dev->rx_outstanding++; } else dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD); tx_limit--; buf_len--; @@ -509,10 +502,8 @@ i2c_dw_read(struct dw_i2c_dev *dev) rx_valid = dw_readl(dev, DW_IC_RXFLR); - for (; len > 0 && rx_valid > 0; len--, rx_valid--) { + for (; len > 0 && rx_valid > 0; len--, rx_valid--) *buf++ = dw_readl(dev, DW_IC_DATA_CMD); - dev->rx_outstanding--; - } if (len > 0) { dev->status |= STATUS_READ_IN_PROGRESS; @@ -570,7 +561,6 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) dev->msg_err = 0; dev->status = STATUS_IDLE; dev->abort_source = 0; - dev->rx_outstanding = 0; ret = i2c_dw_wait_bus_not_busy(dev); if (ret < 0) diff --git a/trunk/drivers/i2c/busses/i2c-designware-core.h b/trunk/drivers/i2c/busses/i2c-designware-core.h index e761ad18dd61..9c1840ee09c7 100644 --- a/trunk/drivers/i2c/busses/i2c-designware-core.h +++ b/trunk/drivers/i2c/busses/i2c-designware-core.h @@ -60,7 +60,6 @@ * @adapter: i2c subsystem adapter node * @tx_fifo_depth: depth of the hardware tx fifo * @rx_fifo_depth: depth of the hardware rx fifo - * @rx_outstanding: current master-rx elements in tx fifo */ struct dw_i2c_dev { struct device *dev; @@ -89,7 +88,6 @@ struct dw_i2c_dev { u32 master_cfg; unsigned int tx_fifo_depth; unsigned int rx_fifo_depth; - int rx_outstanding; }; #define ACCESS_SWAP 0x00000001 diff --git a/trunk/drivers/i2c/busses/i2c-designware-platdrv.c b/trunk/drivers/i2c/busses/i2c-designware-platdrv.c index 35b70a1edf57..8ec91335d95a 100644 --- a/trunk/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/trunk/drivers/i2c/busses/i2c-designware-platdrv.c @@ -69,7 +69,6 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev) static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT33C2", 0 }, { "INT33C3", 0 }, - { "80860F41", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); diff --git a/trunk/drivers/i2c/busses/i2c-i801.c b/trunk/drivers/i2c/busses/i2c-i801.c index 3a6903f63913..e1cf2e0e1f23 100644 --- a/trunk/drivers/i2c/busses/i2c-i801.c +++ b/trunk/drivers/i2c/busses/i2c-i801.c @@ -231,11 +231,7 @@ static const char *i801_feature_names[] = { static unsigned int disable_features; module_param(disable_features, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n" - "\t\t 0x01 disable SMBus PEC\n" - "\t\t 0x02 disable the block buffer\n" - "\t\t 0x08 disable the I2C block read functionality\n" - "\t\t 0x10 don't use interrupts "); +MODULE_PARM_DESC(disable_features, "Disable selected driver features"); /* Make sure the SMBus host is ready to start transmitting. Return 0 if it is, -EBUSY if it is not. */ diff --git a/trunk/drivers/i2c/busses/i2c-mv64xxx.c b/trunk/drivers/i2c/busses/i2c-mv64xxx.c index 1a3abd6a0bfc..3bbd65d35a5e 100644 --- a/trunk/drivers/i2c/busses/i2c-mv64xxx.c +++ b/trunk/drivers/i2c/busses/i2c-mv64xxx.c @@ -252,7 +252,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) writel(drv_data->cntl_bits, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; - wake_up(&drv_data->waitq); + wake_up_interruptible(&drv_data->waitq); break; case MV64XXX_I2C_ACTION_CONTINUE: @@ -300,7 +300,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; - wake_up(&drv_data->waitq); + wake_up_interruptible(&drv_data->waitq); break; case MV64XXX_I2C_ACTION_INVALID: @@ -315,7 +315,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; - wake_up(&drv_data->waitq); + wake_up_interruptible(&drv_data->waitq); break; } } @@ -381,7 +381,7 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data) unsigned long flags; char abort = 0; - time_left = wait_event_timeout(drv_data->waitq, + time_left = wait_event_interruptible_timeout(drv_data->waitq, !drv_data->block, drv_data->adapter.timeout); spin_lock_irqsave(&drv_data->lock, flags); diff --git a/trunk/drivers/i2c/busses/i2c-s3c2410.c b/trunk/drivers/i2c/busses/i2c-s3c2410.c index cab1c91b75a3..6e8ee92ab553 100644 --- a/trunk/drivers/i2c/busses/i2c-s3c2410.c +++ b/trunk/drivers/i2c/busses/i2c-s3c2410.c @@ -1082,6 +1082,11 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) /* map the registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "cannot find IO resource\n"); + return -ENOENT; + } + i2c->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(i2c->regs)) diff --git a/trunk/drivers/i2c/busses/i2c-sirf.c b/trunk/drivers/i2c/busses/i2c-sirf.c index a63c7d506836..5a7ad240bd26 100644 --- a/trunk/drivers/i2c/busses/i2c-sirf.c +++ b/trunk/drivers/i2c/busses/i2c-sirf.c @@ -303,6 +303,12 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) adap->class = I2C_CLASS_HWMON; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (mem_res == NULL) { + dev_err(&pdev->dev, "Unable to get MEM resource\n"); + err = -EINVAL; + goto out; + } + siic->base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(siic->base)) { err = PTR_ERR(siic->base); diff --git a/trunk/drivers/i2c/busses/i2c-tegra.c b/trunk/drivers/i2c/busses/i2c-tegra.c index 9aa1b60f7fdd..b60ff90adc39 100644 --- a/trunk/drivers/i2c/busses/i2c-tegra.c +++ b/trunk/drivers/i2c/busses/i2c-tegra.c @@ -714,6 +714,11 @@ static int tegra_i2c_probe(struct platform_device *pdev) int ret = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "no mem resource\n"); + return -EINVAL; + } + base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/trunk/drivers/i2c/i2c-core.c b/trunk/drivers/i2c/i2c-core.c index 48e31ed69dbf..6b63cc7eb71e 100644 --- a/trunk/drivers/i2c/i2c-core.c +++ b/trunk/drivers/i2c/i2c-core.c @@ -892,8 +892,7 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device); -static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL, - i2c_sysfs_delete_device); +static DEVICE_ATTR(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device); static struct attribute *i2c_adapter_attrs[] = { &dev_attr_name.attr, diff --git a/trunk/drivers/iio/adc/exynos_adc.c b/trunk/drivers/iio/adc/exynos_adc.c index b3d03d335948..9f3a8ef1fb3e 100644 --- a/trunk/drivers/iio/adc/exynos_adc.c +++ b/trunk/drivers/iio/adc/exynos_adc.c @@ -390,8 +390,8 @@ static int exynos_adc_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int exynos_adc_suspend(struct device *dev) { - struct iio_dev *indio_dev = dev_get_drvdata(dev); - struct exynos_adc *info = iio_priv(indio_dev); + struct platform_device *pdev = to_platform_device(dev); + struct exynos_adc *info = platform_get_drvdata(pdev); u32 con; if (info->version == ADC_V2) { @@ -413,8 +413,8 @@ static int exynos_adc_suspend(struct device *dev) static int exynos_adc_resume(struct device *dev) { - struct iio_dev *indio_dev = dev_get_drvdata(dev); - struct exynos_adc *info = iio_priv(indio_dev); + struct platform_device *pdev = to_platform_device(dev); + struct exynos_adc *info = platform_get_drvdata(pdev); int ret; ret = regulator_enable(info->vdd); diff --git a/trunk/drivers/iio/buffer_cb.c b/trunk/drivers/iio/buffer_cb.c index 9d19ba74f22b..9201022945e9 100644 --- a/trunk/drivers/iio/buffer_cb.c +++ b/trunk/drivers/iio/buffer_cb.c @@ -64,7 +64,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, while (chan->indio_dev) { if (chan->indio_dev != indio_dev) { ret = -EINVAL; - goto error_free_scan_mask; + goto error_release_channels; } set_bit(chan->channel->scan_index, cb_buff->buffer.scan_mask); @@ -73,8 +73,6 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, return cb_buff; -error_free_scan_mask: - kfree(cb_buff->buffer.scan_mask); error_release_channels: iio_channel_release_all(cb_buff->channels); error_free_cb_buff: @@ -102,7 +100,6 @@ EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb); void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff) { - kfree(cb_buff->buffer.scan_mask); iio_channel_release_all(cb_buff->channels); kfree(cb_buff); } diff --git a/trunk/drivers/iio/common/st_sensors/st_sensors_core.c b/trunk/drivers/iio/common/st_sensors/st_sensors_core.c index ed9bc8ae9330..bd33473f8e38 100644 --- a/trunk/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/trunk/drivers/iio/common/st_sensors/st_sensors_core.c @@ -312,8 +312,6 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, goto read_error; *val = *val >> ch->scan_type.shift; - - err = st_sensors_set_enable(indio_dev, false); } mutex_unlock(&indio_dev->mlock); diff --git a/trunk/drivers/iio/dac/Kconfig b/trunk/drivers/iio/dac/Kconfig index b61160bd935e..f4a6f0838327 100644 --- a/trunk/drivers/iio/dac/Kconfig +++ b/trunk/drivers/iio/dac/Kconfig @@ -5,7 +5,7 @@ menu "Digital to analog converters" config AD5064 tristate "Analog Devices AD5064 and similar multi-channel DAC driver" - depends on (SPI_MASTER && I2C!=m) || I2C + depends on (SPI_MASTER || I2C) help Say yes here to build support for Analog Devices AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668, @@ -27,7 +27,7 @@ config AD5360 config AD5380 tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver" - depends on (SPI_MASTER && I2C!=m) || I2C + depends on (SPI_MASTER || I2C) select REGMAP_I2C if I2C select REGMAP_SPI if SPI_MASTER help @@ -57,7 +57,7 @@ config AD5624R_SPI config AD5446 tristate "Analog Devices AD5446 and similar single channel DACs driver" - depends on (SPI_MASTER && I2C!=m) || I2C + depends on (SPI_MASTER || I2C) help Say yes here to build support for Analog Devices AD5300, AD5301, AD5310, AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453, diff --git a/trunk/drivers/iio/frequency/adf4350.c b/trunk/drivers/iio/frequency/adf4350.c index e76d4ace53ff..a884252ac66b 100644 --- a/trunk/drivers/iio/frequency/adf4350.c +++ b/trunk/drivers/iio/frequency/adf4350.c @@ -212,7 +212,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) (pdata->r2_user_settings & (ADF4350_REG2_PD_POLARITY_POS | ADF4350_REG2_LDP_6ns | ADF4350_REG2_LDF_INT_N | ADF4350_REG2_CHARGE_PUMP_CURR_uA(5000) | - ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x3))); + ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x9))); st->regs[ADF4350_REG3] = pdata->r3_user_settings & (ADF4350_REG3_12BIT_CLKDIV(0xFFF) | diff --git a/trunk/drivers/iio/inkern.c b/trunk/drivers/iio/inkern.c index 98ddc323add0..795d100b4c36 100644 --- a/trunk/drivers/iio/inkern.c +++ b/trunk/drivers/iio/inkern.c @@ -124,7 +124,7 @@ static int __of_iio_channel_get(struct iio_channel *channel, channel->indio_dev = indio_dev; index = iiospec.args_count ? iiospec.args[0] : 0; if (index >= indio_dev->num_channels) { - err = -EINVAL; + return -EINVAL; goto err_put; } channel->channel = &indio_dev->channels[index]; @@ -450,7 +450,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, s64 raw64 = raw; int ret; - ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET); + ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE); if (ret == 0) raw64 += offset; diff --git a/trunk/drivers/infiniband/hw/qib/qib_keys.c b/trunk/drivers/infiniband/hw/qib/qib_keys.c index 3b9afccaaade..81c7b73695d2 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_keys.c +++ b/trunk/drivers/infiniband/hw/qib/qib_keys.c @@ -61,7 +61,7 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) if (dma_region) { struct qib_mregion *tmr; - tmr = rcu_access_pointer(dev->dma_mr); + tmr = rcu_dereference(dev->dma_mr); if (!tmr) { qib_get_mr(mr); rcu_assign_pointer(dev->dma_mr, mr); diff --git a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c index 2e84ef859c5b..f19b0998a53c 100644 --- a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -5,7 +5,6 @@ * Copyright (C) 2004 Alex Aizman * Copyright (C) 2005 Mike Christie * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved. - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * maintained by openib-general@openib.org * * This software is available to you under a choice of one of two diff --git a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h index 4f069c0d4c04..06f578cde75b 100644 --- a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -8,7 +8,6 @@ * * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/trunk/drivers/infiniband/ulp/iser/iser_initiator.c b/trunk/drivers/infiniband/ulp/iser/iser_initiator.c index b6d81a86c976..a00ccd1ca333 100644 --- a/trunk/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/trunk/drivers/infiniband/ulp/iser/iser_initiator.c @@ -1,6 +1,5 @@ /* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/trunk/drivers/infiniband/ulp/iser/iser_memory.c b/trunk/drivers/infiniband/ulp/iser/iser_memory.c index 7827baf455a1..68ebb7fe072a 100644 --- a/trunk/drivers/infiniband/ulp/iser/iser_memory.c +++ b/trunk/drivers/infiniband/ulp/iser/iser_memory.c @@ -1,6 +1,5 @@ /* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/trunk/drivers/infiniband/ulp/iser/iser_verbs.c b/trunk/drivers/infiniband/ulp/iser/iser_verbs.c index 2c4941d0656b..5278916c3103 100644 --- a/trunk/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/trunk/drivers/infiniband/ulp/iser/iser_verbs.c @@ -1,7 +1,6 @@ /* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -293,10 +292,10 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) } /** - * releases the FMR pool and QP objects, returns 0 on success, + * releases the FMR pool, QP and CMA ID objects, returns 0 on success, * -1 on failure */ -static int iser_free_ib_conn_res(struct iser_conn *ib_conn) +static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) { int cq_index; BUG_ON(ib_conn == NULL); @@ -315,9 +314,13 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn) rdma_destroy_qp(ib_conn->cma_id); } + /* if cma handler context, the caller acts s.t the cma destroy the id */ + if (ib_conn->cma_id != NULL && can_destroy_id) + rdma_destroy_id(ib_conn->cma_id); ib_conn->fmr_pool = NULL; ib_conn->qp = NULL; + ib_conn->cma_id = NULL; kfree(ib_conn->page_vec); if (ib_conn->login_buf) { @@ -412,16 +415,11 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) list_del(&ib_conn->conn_list); mutex_unlock(&ig.connlist_mutex); iser_free_rx_descriptors(ib_conn); - iser_free_ib_conn_res(ib_conn); + iser_free_ib_conn_res(ib_conn, can_destroy_id); ib_conn->device = NULL; /* on EVENT_ADDR_ERROR there's no device yet for this conn */ if (device != NULL) iser_device_try_release(device); - /* if cma handler context, the caller actually destroy the id */ - if (ib_conn->cma_id != NULL && can_destroy_id) { - rdma_destroy_id(ib_conn->cma_id); - ib_conn->cma_id = NULL; - } iscsi_destroy_endpoint(ib_conn->ep); } diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c index 3f3f0416fbdd..b08ca7a9f76b 100644 --- a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2226,27 +2226,6 @@ static void srpt_close_ch(struct srpt_rdma_ch *ch) spin_unlock_irq(&sdev->spinlock); } -/** - * srpt_shutdown_session() - Whether or not a session may be shut down. - */ -static int srpt_shutdown_session(struct se_session *se_sess) -{ - struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr; - unsigned long flags; - - spin_lock_irqsave(&ch->spinlock, flags); - if (ch->in_shutdown) { - spin_unlock_irqrestore(&ch->spinlock, flags); - return true; - } - - ch->in_shutdown = true; - target_sess_cmd_list_set_waiting(se_sess); - spin_unlock_irqrestore(&ch->spinlock, flags); - - return true; -} - /** * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. * @cm_id: Pointer to the CM ID of the channel to be drained. @@ -2285,9 +2264,6 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id) spin_unlock_irq(&sdev->spinlock); if (do_reset) { - if (ch->sess) - srpt_shutdown_session(ch->sess); - ret = srpt_ch_qp_err(ch); if (ret < 0) printk(KERN_ERR "Setting queue pair in error state" @@ -2352,7 +2328,7 @@ static void srpt_release_channel_work(struct work_struct *w) se_sess = ch->sess; BUG_ON(!se_sess); - target_wait_for_sess_cmds(se_sess); + target_wait_for_sess_cmds(se_sess, 0); transport_deregister_session_configfs(se_sess); transport_deregister_session(se_sess); @@ -3490,6 +3466,14 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) spin_unlock_irqrestore(&ch->spinlock, flags); } +/** + * srpt_shutdown_session() - Whether or not a session may be shut down. + */ +static int srpt_shutdown_session(struct se_session *se_sess) +{ + return true; +} + /** * srpt_close_session() - Forcibly close a session. * diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.h b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.h index 3dae156905de..4caf55cda7b1 100644 --- a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -325,7 +325,6 @@ struct srpt_rdma_ch { u8 sess_name[36]; struct work_struct release_work; struct completion *release_done; - bool in_shutdown; }; /** diff --git a/trunk/drivers/input/joystick/xpad.c b/trunk/drivers/input/joystick/xpad.c index fa061d46527f..d6cbfe9df218 100644 --- a/trunk/drivers/input/joystick/xpad.c +++ b/trunk/drivers/input/joystick/xpad.c @@ -137,7 +137,7 @@ static const struct xpad_device { { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, - { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 }, { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, diff --git a/trunk/drivers/input/keyboard/Kconfig b/trunk/drivers/input/keyboard/Kconfig index 7ac9c9818d55..62a2c0e4cc99 100644 --- a/trunk/drivers/input/keyboard/Kconfig +++ b/trunk/drivers/input/keyboard/Kconfig @@ -431,7 +431,6 @@ config KEYBOARD_TEGRA config KEYBOARD_OPENCORES tristate "OpenCores Keyboard Controller" - depends on HAS_IOMEM help Say Y here if you want to use the OpenCores Keyboard Controller http://www.opencores.org/project,keyboardcontroller diff --git a/trunk/drivers/input/mouse/synaptics.c b/trunk/drivers/input/mouse/synaptics.c index b2420ae19e14..2f78538e09d0 100644 --- a/trunk/drivers/input/mouse/synaptics.c +++ b/trunk/drivers/input/mouse/synaptics.c @@ -1379,7 +1379,6 @@ static int synaptics_reconnect(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; struct synaptics_data old_priv = *priv; - unsigned char param[2]; int retry = 0; int error; @@ -1395,7 +1394,6 @@ static int synaptics_reconnect(struct psmouse *psmouse) */ ssleep(1); } - ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETID); error = synaptics_detect(psmouse, 0); } while (error && ++retry < 3); diff --git a/trunk/drivers/input/serio/Kconfig b/trunk/drivers/input/serio/Kconfig index 1bda828f4b55..aebfe3ecb945 100644 --- a/trunk/drivers/input/serio/Kconfig +++ b/trunk/drivers/input/serio/Kconfig @@ -205,7 +205,6 @@ config SERIO_XILINX_XPS_PS2 config SERIO_ALTERA_PS2 tristate "Altera UP PS/2 controller" - depends on HAS_IOMEM help Say Y here if you have Altera University Program PS/2 ports. diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c index 384fbcd0cee0..0bfd8cf25200 100644 --- a/trunk/drivers/input/tablet/wacom_wac.c +++ b/trunk/drivers/input/tablet/wacom_wac.c @@ -342,10 +342,10 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) | ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12); - switch (wacom->id[idx]) { + switch (wacom->id[idx] & 0xfffff) { case 0x812: /* Inking pen */ case 0x801: /* Intuos3 Inking pen */ - case 0x120802: /* Intuos4/5 Inking Pen */ + case 0x20802: /* Intuos4 Inking Pen */ case 0x012: wacom->tool[idx] = BTN_TOOL_PENCIL; break; @@ -356,14 +356,11 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x823: /* Intuos3 Grip Pen */ case 0x813: /* Intuos3 Classic Pen */ case 0x885: /* Intuos3 Marker Pen */ - case 0x802: /* Intuos4/5 13HD/24HD General Pen */ - case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */ + case 0x802: /* Intuos4 General Pen */ + case 0x804: /* Intuos4 Marker Pen */ + case 0x40802: /* Intuos4 Classic Pen */ + case 0x18802: /* DTH2242 Grip Pen */ case 0x022: - case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */ - case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ - case 0x160802: /* Cintiq 13HD Pro Pen */ - case 0x180802: /* DTH2242 Pen */ - case 0x100802: /* Intuos4/5 13HD/24HD General Pen */ wacom->tool[idx] = BTN_TOOL_PEN; break; @@ -394,15 +391,10 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x82b: /* Intuos3 Grip Pen Eraser */ case 0x81b: /* Intuos3 Classic Pen Eraser */ case 0x91b: /* Intuos3 Airbrush Eraser */ - case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */ - case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */ - case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */ - case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */ - case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */ - case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ - case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ - case 0x18080a: /* DTH2242 Eraser */ - case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */ + case 0x80c: /* Intuos4 Marker Pen Eraser */ + case 0x80a: /* Intuos4 General Pen Eraser */ + case 0x4080a: /* Intuos4 Classic Pen Eraser */ + case 0x90a: /* Intuos4 Airbrush Eraser */ wacom->tool[idx] = BTN_TOOL_RUBBER; break; @@ -410,8 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x912: case 0x112: case 0x913: /* Intuos3 Airbrush */ - case 0x902: /* Intuos4/5 13HD/24HD Airbrush */ - case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */ + case 0x902: /* Intuos4 Airbrush */ wacom->tool[idx] = BTN_TOOL_AIRBRUSH; break; @@ -542,8 +533,10 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_key(input, BTN_8, (data[3] & 0x80)); } if (data[1] | (data[2] & 0x01) | data[3]) { + input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { + input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else if (features->type == DTK) { @@ -553,26 +546,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_key(input, BTN_3, (data[6] & 0x08)); input_report_key(input, BTN_4, (data[6] & 0x10)); input_report_key(input, BTN_5, (data[6] & 0x20)); - if (data[6] & 0x3f) { - input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); - } else { - input_report_abs(input, ABS_MISC, 0); - } - } else if (features->type == WACOM_13HD) { - input_report_key(input, BTN_0, (data[3] & 0x01)); - input_report_key(input, BTN_1, (data[4] & 0x01)); - input_report_key(input, BTN_2, (data[4] & 0x02)); - input_report_key(input, BTN_3, (data[4] & 0x04)); - input_report_key(input, BTN_4, (data[4] & 0x08)); - input_report_key(input, BTN_5, (data[4] & 0x10)); - input_report_key(input, BTN_6, (data[4] & 0x20)); - input_report_key(input, BTN_7, (data[4] & 0x40)); - input_report_key(input, BTN_8, (data[4] & 0x80)); - if ((data[3] & 0x01) | data[4]) { - input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); - } else { - input_report_abs(input, ABS_MISC, 0); - } } else if (features->type == WACOM_24HD) { input_report_key(input, BTN_0, (data[6] & 0x01)); input_report_key(input, BTN_1, (data[6] & 0x02)); @@ -617,8 +590,10 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) } if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) { + input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { + input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) { @@ -643,8 +618,10 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) } if (data[2] | (data[3] & 0x01) | data[4] | data[5]) { + input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { + input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else { @@ -691,8 +668,10 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) | data[2] | (data[3] & 0x1f) | data[4] | data[8] | (data[7] & 0x01)) { + input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { + input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } @@ -1322,7 +1301,6 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) case INTUOS4L: case CINTIQ: case WACOM_BEE: - case WACOM_13HD: case WACOM_21UX2: case WACOM_22HD: case WACOM_24HD: @@ -1552,15 +1530,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(KEY_PROG1, input_dev->keybit); __set_bit(KEY_PROG2, input_dev->keybit); __set_bit(KEY_PROG3, input_dev->keybit); - - input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); - input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); /* fall through */ case DTK: for (i = 0; i < 6; i++) __set_bit(BTN_0 + i, input_dev->keybit); + input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); wacom_setup_cintiq(wacom_wac); @@ -1601,15 +1579,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, wacom_setup_cintiq(wacom_wac); break; - case WACOM_13HD: - for (i = 0; i < 9; i++) - __set_bit(BTN_0 + i, input_dev->keybit); - - input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); - __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); - wacom_setup_cintiq(wacom_wac); - break; - case INTUOS3: case INTUOS3L: __set_bit(BTN_4, input_dev->keybit); @@ -1968,8 +1937,7 @@ static const struct wacom_features wacom_features_0xF4 = 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xF8 = { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, /* Pen */ - 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, - .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 }; + 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 }; static const struct wacom_features wacom_features_0xF6 = { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 }; @@ -1982,9 +1950,6 @@ static const struct wacom_features wacom_features_0xC5 = static const struct wacom_features wacom_features_0xC6 = { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023, 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; -static const struct wacom_features wacom_features_0x304 = - { "Wacom Cintiq 13HD", WACOM_PKGLEN_INTUOS, 59552, 33848, 1023, - 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xC7 = { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511, 0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -1994,9 +1959,6 @@ static const struct wacom_features wacom_features_0xCE = static const struct wacom_features wacom_features_0xF0 = { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511, 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; -static const struct wacom_features wacom_features_0x57 = - { "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, - 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES}; static const struct wacom_features wacom_features_0x59 = /* Pen */ { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, @@ -2010,13 +1972,6 @@ static const struct wacom_features wacom_features_0xCC = static const struct wacom_features wacom_features_0xFA = { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; -static const struct wacom_features wacom_features_0x5B = - { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, - 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, - .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e }; -static const struct wacom_features wacom_features_0x5E = - { "Wacom Cintiq 22HDT", .type = WACOM_24HDT, - .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 }; static const struct wacom_features wacom_features_0x90 = { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2046,7 +2001,7 @@ static const struct wacom_features wacom_features_0xE5 = static const struct wacom_features wacom_features_0xE6 = { "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255, 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + .touch_max = 2 }; static const struct wacom_features wacom_features_0xEC = { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2188,11 +2143,8 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x43) }, { USB_DEVICE_WACOM(0x44) }, { USB_DEVICE_WACOM(0x45) }, - { USB_DEVICE_WACOM(0x57) }, { USB_DEVICE_WACOM(0x59) }, { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) }, - { USB_DEVICE_WACOM(0x5B) }, - { USB_DEVICE_DETAILED(0x5E, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0xB0) }, { USB_DEVICE_WACOM(0xB1) }, { USB_DEVICE_WACOM(0xB2) }, @@ -2253,7 +2205,6 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x100) }, { USB_DEVICE_WACOM(0x101) }, { USB_DEVICE_WACOM(0x10D) }, - { USB_DEVICE_WACOM(0x304) }, { USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, diff --git a/trunk/drivers/input/tablet/wacom_wac.h b/trunk/drivers/input/tablet/wacom_wac.h index dfc9e08e7f70..5f9a7721e16c 100644 --- a/trunk/drivers/input/tablet/wacom_wac.h +++ b/trunk/drivers/input/tablet/wacom_wac.h @@ -82,7 +82,6 @@ enum { WACOM_24HD, CINTIQ, WACOM_BEE, - WACOM_13HD, WACOM_MO, WIRELESS, BAMBOO_PT, diff --git a/trunk/drivers/input/touchscreen/cyttsp_core.c b/trunk/drivers/input/touchscreen/cyttsp_core.c index ae89d2609ab0..8e60437ac85b 100644 --- a/trunk/drivers/input/touchscreen/cyttsp_core.c +++ b/trunk/drivers/input/touchscreen/cyttsp_core.c @@ -116,15 +116,6 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd) return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); } -static int cyttsp_handshake(struct cyttsp *ts) -{ - if (ts->pdata->use_hndshk) - return ttsp_send_command(ts, - ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); - - return 0; -} - static int cyttsp_load_bl_regs(struct cyttsp *ts) { memset(&ts->bl_data, 0, sizeof(ts->bl_data)); @@ -142,7 +133,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts) memcpy(bl_cmd, bl_command, sizeof(bl_command)); if (ts->pdata->bl_keys) memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], - ts->pdata->bl_keys, CY_NUM_BL_KEYS); + ts->pdata->bl_keys, sizeof(bl_command)); error = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(bl_cmd), bl_cmd); @@ -176,10 +167,6 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts) if (error) return error; - error = cyttsp_handshake(ts); - if (error) - return error; - return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; } @@ -201,10 +188,6 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts) if (error) return error; - error = cyttsp_handshake(ts); - if (error) - return error; - if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) return -EIO; @@ -361,9 +344,12 @@ static irqreturn_t cyttsp_irq(int irq, void *handle) goto out; /* provide flow control handshake */ - error = cyttsp_handshake(ts); - if (error) - goto out; + if (ts->pdata->use_hndshk) { + error = ttsp_send_command(ts, + ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); + if (error) + goto out; + } if (unlikely(ts->state == CY_IDLE_STATE)) goto out; diff --git a/trunk/drivers/input/touchscreen/cyttsp_core.h b/trunk/drivers/input/touchscreen/cyttsp_core.h index f1ebde369f86..1aa3c6967e70 100644 --- a/trunk/drivers/input/touchscreen/cyttsp_core.h +++ b/trunk/drivers/input/touchscreen/cyttsp_core.h @@ -67,8 +67,8 @@ struct cyttsp_xydata { /* TTSP System Information interface definition */ struct cyttsp_sysinfo_data { u8 hst_mode; - u8 mfg_stat; u8 mfg_cmd; + u8 mfg_stat; u8 cid[3]; u8 tt_undef1; u8 uid[8]; diff --git a/trunk/drivers/input/touchscreen/egalax_ts.c b/trunk/drivers/input/touchscreen/egalax_ts.c index 39f3df8670c3..17c9097f3b5d 100644 --- a/trunk/drivers/input/touchscreen/egalax_ts.c +++ b/trunk/drivers/input/touchscreen/egalax_ts.c @@ -216,7 +216,7 @@ static int egalax_ts_probe(struct i2c_client *client, input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0); input_set_abs_params(input_dev, - ABS_MT_POSITION_Y, 0, EGALAX_MAX_Y, 0, 0); + ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0); input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0); input_set_drvdata(input_dev, ts); diff --git a/trunk/drivers/irqchip/irq-gic.c b/trunk/drivers/irqchip/irq-gic.c index 19ceaa60e0f4..1760ceb68b7b 100644 --- a/trunk/drivers/irqchip/irq-gic.c +++ b/trunk/drivers/irqchip/irq-gic.c @@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d, static int __cpuinit gic_secondary_init(struct notifier_block *nfb, unsigned long action, void *hcpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + if (action == CPU_STARTING) gic_cpu_init(&gic_data[0]); return NOTIFY_OK; } diff --git a/trunk/drivers/irqchip/irq-mxs.c b/trunk/drivers/irqchip/irq-mxs.c index 63b3d4eb0ef7..29889bbdcc6d 100644 --- a/trunk/drivers/irqchip/irq-mxs.c +++ b/trunk/drivers/irqchip/irq-mxs.c @@ -76,10 +76,16 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) { u32 irqnr; - irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); - __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); - irqnr = irq_find_mapping(icoll_domain, irqnr); - handle_IRQ(irqnr, regs); + do { + irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); + if (irqnr != 0x7f) { + __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); + irqnr = irq_find_mapping(icoll_domain, irqnr); + handle_IRQ(irqnr, regs); + continue; + } + break; + } while (1); } static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, diff --git a/trunk/drivers/irqchip/irq-versatile-fpga.c b/trunk/drivers/irqchip/irq-versatile-fpga.c index 47a52ab580d8..065b7a31a478 100644 --- a/trunk/drivers/irqchip/irq-versatile-fpga.c +++ b/trunk/drivers/irqchip/irq-versatile-fpga.c @@ -119,7 +119,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq, /* Skip invalid IRQs, only register handlers for the real ones */ if (!(f->valid & BIT(hwirq))) - return -EPERM; + return -ENOTSUPP; irq_set_chip_data(irq, f); irq_set_chip_and_handler(irq, &f->chip, handle_level_irq); diff --git a/trunk/drivers/irqchip/irq-vic.c b/trunk/drivers/irqchip/irq-vic.c index 2bbb00404cf5..884d11c7355f 100644 --- a/trunk/drivers/irqchip/irq-vic.c +++ b/trunk/drivers/irqchip/irq-vic.c @@ -197,7 +197,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq, /* Skip invalid IRQs, only register handlers for the real ones */ if (!(v->valid_sources & (1 << hwirq))) - return -EPERM; + return -ENOTSUPP; irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); irq_set_chip_data(irq, v->base); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); diff --git a/trunk/drivers/isdn/capi/kcapi.c b/trunk/drivers/isdn/capi/kcapi.c index c123709acf82..9b1b274c7d25 100644 --- a/trunk/drivers/isdn/capi/kcapi.c +++ b/trunk/drivers/isdn/capi/kcapi.c @@ -93,7 +93,7 @@ capi_ctr_put(struct capi_ctr *ctr) static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr) { - if (contr < 1 || contr - 1 >= CAPI_MAXCONTR) + if (contr - 1 >= CAPI_MAXCONTR) return NULL; return capi_controller[contr - 1]; @@ -103,7 +103,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid) { lockdep_assert_held(&capi_controller_lock); - if (applid < 1 || applid - 1 >= CAPI_MAXAPPL) + if (applid - 1 >= CAPI_MAXAPPL) return NULL; return capi_applications[applid - 1]; @@ -111,7 +111,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid) static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid) { - if (applid < 1 || applid - 1 >= CAPI_MAXAPPL) + if (applid - 1 >= CAPI_MAXAPPL) return NULL; return rcu_dereference(capi_applications[applid - 1]); diff --git a/trunk/drivers/leds/leds-gpio.c b/trunk/drivers/leds/leds-gpio.c index b02b679abf31..a0d931bcb37c 100644 --- a/trunk/drivers/leds/leds-gpio.c +++ b/trunk/drivers/leds/leds-gpio.c @@ -107,10 +107,6 @@ static int create_gpio_led(const struct gpio_led *template, return 0; } - ret = devm_gpio_request(parent, template->gpio, template->name); - if (ret < 0) - return ret; - led_dat->cdev.name = template->name; led_dat->cdev.default_trigger = template->default_trigger; led_dat->gpio = template->gpio; @@ -130,7 +126,10 @@ static int create_gpio_led(const struct gpio_led *template, if (!template->retain_state_suspended) led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; - ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state); + ret = devm_gpio_request_one(parent, template->gpio, + (led_dat->active_low ^ state) ? + GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, + template->name); if (ret < 0) return ret; diff --git a/trunk/drivers/leds/leds-ot200.c b/trunk/drivers/leds/leds-ot200.c index 98cae529373f..ee14662ed5ce 100644 --- a/trunk/drivers/leds/leds-ot200.c +++ b/trunk/drivers/leds/leds-ot200.c @@ -47,37 +47,37 @@ static struct ot200_led leds[] = { { .name = "led_1", .port = 0x49, - .mask = BIT(6), + .mask = BIT(7), }, { .name = "led_2", .port = 0x49, - .mask = BIT(5), + .mask = BIT(6), }, { .name = "led_3", .port = 0x49, - .mask = BIT(4), + .mask = BIT(5), }, { .name = "led_4", .port = 0x49, - .mask = BIT(3), + .mask = BIT(4), }, { .name = "led_5", .port = 0x49, - .mask = BIT(2), + .mask = BIT(3), }, { .name = "led_6", .port = 0x49, - .mask = BIT(1), + .mask = BIT(2), }, { .name = "led_7", .port = 0x49, - .mask = BIT(0), + .mask = BIT(1), } }; diff --git a/trunk/drivers/lguest/page_tables.c b/trunk/drivers/lguest/page_tables.c index 5b9ac32801c7..699187ab3800 100644 --- a/trunk/drivers/lguest/page_tables.c +++ b/trunk/drivers/lguest/page_tables.c @@ -1002,7 +1002,6 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) kill_guest(&lg->cpus[0], "Cannot populate switcher mapping"); } - lg->pgdirs[pgdir].last_host_cpu = -1; } } diff --git a/trunk/drivers/md/bcache/Kconfig b/trunk/drivers/md/bcache/Kconfig index f950c9d29f3e..05c220d05e23 100644 --- a/trunk/drivers/md/bcache/Kconfig +++ b/trunk/drivers/md/bcache/Kconfig @@ -1,6 +1,7 @@ config BCACHE tristate "Block device as cache" + select CLOSURES ---help--- Allows a block device to be used as cache for other devices; uses a btree for indexing and the layout is optimized for SSDs. diff --git a/trunk/drivers/md/bcache/bcache.h b/trunk/drivers/md/bcache/bcache.h index d3e15b42a4ab..340146d7c17f 100644 --- a/trunk/drivers/md/bcache/bcache.h +++ b/trunk/drivers/md/bcache/bcache.h @@ -1241,7 +1241,7 @@ void bch_cache_set_stop(struct cache_set *); struct cache_set *bch_cache_set_alloc(struct cache_sb *); void bch_btree_cache_free(struct cache_set *); int bch_btree_cache_alloc(struct cache_set *); -void bch_cached_dev_writeback_init(struct cached_dev *); +void bch_writeback_init_cached_dev(struct cached_dev *); void bch_moving_init_cache_set(struct cache_set *); void bch_cache_allocator_exit(struct cache *ca); diff --git a/trunk/drivers/md/bcache/stats.c b/trunk/drivers/md/bcache/stats.c index b8730e714d69..64e679449c2a 100644 --- a/trunk/drivers/md/bcache/stats.c +++ b/trunk/drivers/md/bcache/stats.c @@ -93,6 +93,24 @@ static struct attribute *bch_stats_files[] = { }; static KTYPE(bch_stats); +static void scale_accounting(unsigned long data); + +void bch_cache_accounting_init(struct cache_accounting *acc, + struct closure *parent) +{ + kobject_init(&acc->total.kobj, &bch_stats_ktype); + kobject_init(&acc->five_minute.kobj, &bch_stats_ktype); + kobject_init(&acc->hour.kobj, &bch_stats_ktype); + kobject_init(&acc->day.kobj, &bch_stats_ktype); + + closure_init(&acc->cl, parent); + init_timer(&acc->timer); + acc->timer.expires = jiffies + accounting_delay; + acc->timer.data = (unsigned long) acc; + acc->timer.function = scale_accounting; + add_timer(&acc->timer); +} + int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, struct kobject *parent) { @@ -226,19 +244,3 @@ void bch_mark_sectors_bypassed(struct search *s, int sectors) atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); } - -void bch_cache_accounting_init(struct cache_accounting *acc, - struct closure *parent) -{ - kobject_init(&acc->total.kobj, &bch_stats_ktype); - kobject_init(&acc->five_minute.kobj, &bch_stats_ktype); - kobject_init(&acc->hour.kobj, &bch_stats_ktype); - kobject_init(&acc->day.kobj, &bch_stats_ktype); - - closure_init(&acc->cl, parent); - init_timer(&acc->timer); - acc->timer.expires = jiffies + accounting_delay; - acc->timer.data = (unsigned long) acc; - acc->timer.function = scale_accounting; - add_timer(&acc->timer); -} diff --git a/trunk/drivers/md/bcache/super.c b/trunk/drivers/md/bcache/super.c index f88e2b653a3f..c8046bc4aa57 100644 --- a/trunk/drivers/md/bcache/super.c +++ b/trunk/drivers/md/bcache/super.c @@ -634,10 +634,11 @@ static int open_dev(struct block_device *b, fmode_t mode) return 0; } -static void release_dev(struct gendisk *b, fmode_t mode) +static int release_dev(struct gendisk *b, fmode_t mode) { struct bcache_device *d = b->private_data; closure_put(&d->cl); + return 0; } static int ioctl_dev(struct block_device *b, fmode_t mode, @@ -731,7 +732,8 @@ static void bcache_device_free(struct bcache_device *d) if (d->c) bcache_device_detach(d); - if (d->disk && d->disk->flags & GENHD_FL_UP) + + if (d->disk) del_gendisk(d->disk); if (d->disk && d->disk->queue) blk_cleanup_queue(d->disk->queue); @@ -754,9 +756,12 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size) if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, sizeof(struct bio_vec) * BIO_MAX_PAGES)) || - bio_split_pool_init(&d->bio_split_hook) || - !(d->disk = alloc_disk(1)) || - !(q = blk_alloc_queue(GFP_KERNEL))) + bio_split_pool_init(&d->bio_split_hook)) + + return -ENOMEM; + + d->disk = alloc_disk(1); + if (!d->disk) return -ENOMEM; snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); @@ -766,6 +771,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size) d->disk->fops = &bcache_ops; d->disk->private_data = d; + q = blk_alloc_queue(GFP_KERNEL); + if (!q) + return -ENOMEM; + blk_queue_make_request(q, NULL); d->disk->queue = q; q->queuedata = d; @@ -990,17 +999,14 @@ static void cached_dev_free(struct closure *cl) mutex_lock(&bch_register_lock); - if (atomic_read(&dc->running)) - bd_unlink_disk_holder(dc->bdev, dc->disk.disk); + bd_unlink_disk_holder(dc->bdev, dc->disk.disk); bcache_device_free(&dc->disk); list_del(&dc->list); mutex_unlock(&bch_register_lock); if (!IS_ERR_OR_NULL(dc->bdev)) { - if (dc->bdev->bd_disk) - blk_sync_queue(bdev_get_queue(dc->bdev)); - + blk_sync_queue(bdev_get_queue(dc->bdev)); blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } @@ -1022,67 +1028,73 @@ static void cached_dev_flush(struct closure *cl) static int cached_dev_init(struct cached_dev *dc, unsigned block_size) { - int ret; + int err; struct io *io; - struct request_queue *q = bdev_get_queue(dc->bdev); - __module_get(THIS_MODULE); - INIT_LIST_HEAD(&dc->list); closure_init(&dc->disk.cl, NULL); set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); + + __module_get(THIS_MODULE); + INIT_LIST_HEAD(&dc->list); kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); - INIT_WORK(&dc->detach, cached_dev_detach_finish); - closure_init_unlocked(&dc->sb_write); - INIT_LIST_HEAD(&dc->io_lru); - spin_lock_init(&dc->io_lock); + bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); + err = bcache_device_init(&dc->disk, block_size); + if (err) + goto err; + + spin_lock_init(&dc->io_lock); + closure_init_unlocked(&dc->sb_write); + INIT_WORK(&dc->detach, cached_dev_detach_finish); + dc->sequential_merge = true; dc->sequential_cutoff = 4 << 20; + INIT_LIST_HEAD(&dc->io_lru); + dc->sb_bio.bi_max_vecs = 1; + dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; + for (io = dc->io; io < dc->io + RECENT_IO; io++) { list_add(&io->lru, &dc->io_lru); hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); } - ret = bcache_device_init(&dc->disk, block_size); - if (ret) - return ret; - - set_capacity(dc->disk.disk, - dc->bdev->bd_part->nr_sects - dc->sb.data_offset); - - dc->disk.disk->queue->backing_dev_info.ra_pages = - max(dc->disk.disk->queue->backing_dev_info.ra_pages, - q->backing_dev_info.ra_pages); - - bch_cached_dev_request_init(dc); - bch_cached_dev_writeback_init(dc); + bch_writeback_init_cached_dev(dc); return 0; +err: + bcache_device_stop(&dc->disk); + return err; } /* Cached device - bcache superblock */ -static void register_bdev(struct cache_sb *sb, struct page *sb_page, +static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cached_dev *dc) { char name[BDEVNAME_SIZE]; const char *err = "cannot allocate memory"; + struct gendisk *g; struct cache_set *c; + if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0) + return err; + memcpy(&dc->sb, sb, sizeof(struct cache_sb)); + dc->sb_bio.bi_io_vec[0].bv_page = sb_page; dc->bdev = bdev; dc->bdev->bd_holder = dc; - bio_init(&dc->sb_bio); - dc->sb_bio.bi_max_vecs = 1; - dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; - dc->sb_bio.bi_io_vec[0].bv_page = sb_page; - get_page(sb_page); + g = dc->disk.disk; - if (cached_dev_init(dc, sb->block_size << 9)) - goto err; + set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); + + g->queue->backing_dev_info.ra_pages = + max(g->queue->backing_dev_info.ra_pages, + bdev->bd_queue->backing_dev_info.ra_pages); + + bch_cached_dev_request_init(dc); err = "error creating kobject"; if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, @@ -1091,8 +1103,6 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) goto err; - pr_info("registered backing device %s", bdevname(bdev, name)); - list_add(&dc->list, &uncached_devices); list_for_each_entry(c, &bch_cache_sets, list) bch_cached_dev_attach(dc, c); @@ -1101,10 +1111,15 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) bch_cached_dev_run(dc); - return; + return NULL; err: + kobject_put(&dc->disk.kobj); pr_notice("error opening %s: %s", bdevname(bdev, name), err); - bcache_device_stop(&dc->disk); + /* + * Return NULL instead of an error because kobject_put() cleans + * everything up + */ + return NULL; } /* Flash only volumes */ @@ -1702,11 +1717,20 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) size_t free; struct bucket *b; + if (!ca) + return -ENOMEM; + __module_get(THIS_MODULE); kobject_init(&ca->kobj, &bch_cache_ktype); + memcpy(&ca->sb, sb, sizeof(struct cache_sb)); + INIT_LIST_HEAD(&ca->discards); + bio_init(&ca->sb_bio); + ca->sb_bio.bi_max_vecs = 1; + ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; + bio_init(&ca->journal.bio); ca->journal.bio.bi_max_vecs = 8; ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; @@ -1718,17 +1742,18 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || !init_heap(&ca->heap, free << 3, GFP_KERNEL) || - !(ca->buckets = vzalloc(sizeof(struct bucket) * + !(ca->buckets = vmalloc(sizeof(struct bucket) * ca->sb.nbuckets)) || !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 2, GFP_KERNEL)) || !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || bio_split_pool_init(&ca->bio_split_hook)) - return -ENOMEM; + goto err; ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); + memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket)); for_each_bucket(b, ca) atomic_set(&b->pin, 0); @@ -1741,28 +1766,22 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) return -ENOMEM; } -static void register_cache(struct cache_sb *sb, struct page *sb_page, +static const char *register_cache(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cache *ca) { char name[BDEVNAME_SIZE]; const char *err = "cannot allocate memory"; - memcpy(&ca->sb, sb, sizeof(struct cache_sb)); - ca->bdev = bdev; - ca->bdev->bd_holder = ca; + if (cache_alloc(sb, ca) != 0) + return err; - bio_init(&ca->sb_bio); - ca->sb_bio.bi_max_vecs = 1; - ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; ca->sb_bio.bi_io_vec[0].bv_page = sb_page; - get_page(sb_page); + ca->bdev = bdev; + ca->bdev->bd_holder = ca; if (blk_queue_discard(bdev_get_queue(ca->bdev))) ca->discard = CACHE_DISCARD(&ca->sb); - if (cache_alloc(sb, ca) != 0) - goto err; - err = "error creating kobject"; if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) goto err; @@ -1772,10 +1791,15 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page, goto err; pr_info("registered cache device %s", bdevname(bdev, name)); - return; + + return NULL; err: - pr_notice("error opening %s: %s", bdevname(bdev, name), err); kobject_put(&ca->kobj); + pr_info("error opening %s: %s", bdevname(bdev, name), err); + /* Return NULL instead of an error because kobject_put() cleans + * everything up + */ + return NULL; } /* Global interfaces/init */ @@ -1809,15 +1833,12 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, bdev = blkdev_get_by_path(strim(path), FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); - if (IS_ERR(bdev)) { - if (bdev == ERR_PTR(-EBUSY)) - err = "device busy"; - goto err; - } + if (bdev == ERR_PTR(-EBUSY)) + err = "device busy"; - err = "failed to set blocksize"; - if (set_blocksize(bdev, 4096)) - goto err_close; + if (IS_ERR(bdev) || + set_blocksize(bdev, 4096)) + goto err; err = read_super(sb, bdev, &sb_page); if (err) @@ -1825,33 +1846,33 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); - if (!dc) - goto err_close; - register_bdev(sb, sb_page, bdev, dc); + err = register_bdev(sb, sb_page, bdev, dc); } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); - if (!ca) - goto err_close; - register_cache(sb, sb_page, bdev, ca); + err = register_cache(sb, sb_page, bdev, ca); } -out: - if (sb_page) + + if (err) { + /* register_(bdev|cache) will only return an error if they + * didn't get far enough to create the kobject - if they did, + * the kobject destructor will do this cleanup. + */ put_page(sb_page); +err_close: + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); +err: + if (attr != &ksysfs_register_quiet) + pr_info("error opening %s: %s", path, err); + ret = -EINVAL; + } + kfree(sb); kfree(path); mutex_unlock(&bch_register_lock); module_put(THIS_MODULE); return ret; - -err_close: - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); -err: - if (attr != &ksysfs_register_quiet) - pr_info("error opening %s: %s", path, err); - ret = -EINVAL; - goto out; } static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) diff --git a/trunk/drivers/md/bcache/writeback.c b/trunk/drivers/md/bcache/writeback.c index 2714ed3991d1..93e7e31a4bd3 100644 --- a/trunk/drivers/md/bcache/writeback.c +++ b/trunk/drivers/md/bcache/writeback.c @@ -375,7 +375,7 @@ static void read_dirty(struct closure *cl) refill_dirty(cl); } -void bch_cached_dev_writeback_init(struct cached_dev *dc) +void bch_writeback_init_cached_dev(struct cached_dev *dc) { closure_init_unlocked(&dc->writeback); init_rwsem(&dc->writeback_lock); diff --git a/trunk/drivers/md/dm-thin.c b/trunk/drivers/md/dm-thin.c index 88f2f802d528..759cffc45cab 100644 --- a/trunk/drivers/md/dm-thin.c +++ b/trunk/drivers/md/dm-thin.c @@ -2188,7 +2188,7 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) *need_commit = false; - metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); + metadata_dev_size = get_metadata_dev_size(pool->md_dev); r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); if (r) { @@ -2197,7 +2197,7 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) } if (metadata_dev_size < sb_metadata_dev_size) { - DMERR("metadata device (%llu blocks) too small: expected %llu", + DMERR("metadata device (%llu sectors) too small: expected %llu", metadata_dev_size, sb_metadata_dev_size); return -EINVAL; diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index 9b82377a833b..681d1099a2d5 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -5268,8 +5268,8 @@ static void md_clean(struct mddev *mddev) static void __md_stop_writes(struct mddev *mddev) { - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); if (mddev->sync_thread) { + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_reap_sync_thread(mddev); } diff --git a/trunk/drivers/md/raid1.c b/trunk/drivers/md/raid1.c index 6e17f8181c4b..55951182af73 100644 --- a/trunk/drivers/md/raid1.c +++ b/trunk/drivers/md/raid1.c @@ -417,17 +417,7 @@ static void raid1_end_write_request(struct bio *bio, int error) r1_bio->bios[mirror] = NULL; to_put = bio; - /* - * Do not set R1BIO_Uptodate if the current device is - * rebuilding or Faulty. This is because we cannot use - * such device for properly reading the data back (we could - * potentially use it, if the current write would have felt - * before rdev->recovery_offset, but for simplicity we don't - * check this here. - */ - if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) && - !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)) - set_bit(R1BIO_Uptodate, &r1_bio->state); + set_bit(R1BIO_Uptodate, &r1_bio->state); /* Maybe we can clear some bad blocks. */ if (is_badblock(conf->mirrors[mirror].rdev, @@ -880,17 +870,17 @@ static void allow_barrier(struct r1conf *conf) wake_up(&conf->wait_barrier); } -static void freeze_array(struct r1conf *conf, int extra) +static void freeze_array(struct r1conf *conf) { /* stop syncio and normal IO and wait for everything to * go quite. * We increment barrier and nr_waiting, and then - * wait until nr_pending match nr_queued+extra + * wait until nr_pending match nr_queued+1 * This is called in the context of one normal IO request * that has failed. Thus any sync request that might be pending * will be blocked by nr_pending, and we need to wait for * pending IO requests to complete or be queued for re-try. - * Thus the number queued (nr_queued) plus this request (extra) + * Thus the number queued (nr_queued) plus this request (1) * must match the number of pending IOs (nr_pending) before * we continue. */ @@ -898,7 +888,7 @@ static void freeze_array(struct r1conf *conf, int extra) conf->barrier++; conf->nr_waiting++; wait_event_lock_irq_cmd(conf->wait_barrier, - conf->nr_pending == conf->nr_queued+extra, + conf->nr_pending == conf->nr_queued+1, conf->resync_lock, flush_pending_writes(conf)); spin_unlock_irq(&conf->resync_lock); @@ -1554,8 +1544,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) * we wait for all outstanding requests to complete. */ synchronize_sched(); - freeze_array(conf, 0); - unfreeze_array(conf); + raise_barrier(conf); + lower_barrier(conf); clear_bit(Unmerged, &rdev->flags); } md_integrity_add_rdev(rdev, mddev); @@ -1605,11 +1595,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) */ struct md_rdev *repl = conf->mirrors[conf->raid_disks + number].rdev; - freeze_array(conf, 0); + raise_barrier(conf); clear_bit(Replacement, &repl->flags); p->rdev = repl; conf->mirrors[conf->raid_disks + number].rdev = NULL; - unfreeze_array(conf); + lower_barrier(conf); clear_bit(WantReplacement, &rdev->flags); } else clear_bit(WantReplacement, &rdev->flags); @@ -2205,7 +2195,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) * frozen */ if (mddev->ro == 0) { - freeze_array(conf, 1); + freeze_array(conf); fix_read_error(conf, r1_bio->read_disk, r1_bio->sector, r1_bio->sectors); unfreeze_array(conf); @@ -2790,8 +2780,8 @@ static int run(struct mddev *mddev) return PTR_ERR(conf); if (mddev->queue) - blk_queue_max_write_same_sectors(mddev->queue, 0); - + blk_queue_max_write_same_sectors(mddev->queue, + mddev->chunk_sectors); rdev_for_each(rdev, mddev) { if (!mddev->gendisk) continue; @@ -2973,7 +2963,7 @@ static int raid1_reshape(struct mddev *mddev) return -ENOMEM; } - freeze_array(conf, 0); + raise_barrier(conf); /* ok, everything is stopped */ oldpool = conf->r1bio_pool; @@ -3004,7 +2994,7 @@ static int raid1_reshape(struct mddev *mddev) conf->raid_disks = mddev->raid_disks = raid_disks; mddev->delta_disks = 0; - unfreeze_array(conf); + lower_barrier(conf); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); diff --git a/trunk/drivers/md/raid10.c b/trunk/drivers/md/raid10.c index 6ddae2501b9a..59d4daa5f4c7 100644 --- a/trunk/drivers/md/raid10.c +++ b/trunk/drivers/md/raid10.c @@ -490,17 +490,7 @@ static void raid10_end_write_request(struct bio *bio, int error) sector_t first_bad; int bad_sectors; - /* - * Do not set R10BIO_Uptodate if the current device is - * rebuilding or Faulty. This is because we cannot use - * such device for properly reading the data back (we could - * potentially use it, if the current write would have felt - * before rdev->recovery_offset, but for simplicity we don't - * check this here. - */ - if (test_bit(In_sync, &rdev->flags) && - !test_bit(Faulty, &rdev->flags)) - set_bit(R10BIO_Uptodate, &r10_bio->state); + set_bit(R10BIO_Uptodate, &r10_bio->state); /* Maybe we can clear some bad blocks. */ if (is_badblock(rdev, @@ -1065,17 +1055,17 @@ static void allow_barrier(struct r10conf *conf) wake_up(&conf->wait_barrier); } -static void freeze_array(struct r10conf *conf, int extra) +static void freeze_array(struct r10conf *conf) { /* stop syncio and normal IO and wait for everything to * go quiet. * We increment barrier and nr_waiting, and then - * wait until nr_pending match nr_queued+extra + * wait until nr_pending match nr_queued+1 * This is called in the context of one normal IO request * that has failed. Thus any sync request that might be pending * will be blocked by nr_pending, and we need to wait for * pending IO requests to complete or be queued for re-try. - * Thus the number queued (nr_queued) plus this request (extra) + * Thus the number queued (nr_queued) plus this request (1) * must match the number of pending IOs (nr_pending) before * we continue. */ @@ -1083,7 +1073,7 @@ static void freeze_array(struct r10conf *conf, int extra) conf->barrier++; conf->nr_waiting++; wait_event_lock_irq_cmd(conf->wait_barrier, - conf->nr_pending == conf->nr_queued+extra, + conf->nr_pending == conf->nr_queued+1, conf->resync_lock, flush_pending_writes(conf)); @@ -1847,8 +1837,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) * we wait for all outstanding requests to complete. */ synchronize_sched(); - freeze_array(conf, 0); - unfreeze_array(conf); + raise_barrier(conf, 0); + lower_barrier(conf); clear_bit(Unmerged, &rdev->flags); } md_integrity_add_rdev(rdev, mddev); @@ -2622,7 +2612,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) r10_bio->devs[slot].bio = NULL; if (mddev->ro == 0) { - freeze_array(conf, 1); + freeze_array(conf); fix_read_error(conf, mddev, r10_bio); unfreeze_array(conf); } else @@ -3619,7 +3609,8 @@ static int run(struct mddev *mddev) if (mddev->queue) { blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); - blk_queue_max_write_same_sectors(mddev->queue, 0); + blk_queue_max_write_same_sectors(mddev->queue, + mddev->chunk_sectors); blk_queue_io_min(mddev->queue, chunk_size); if (conf->geo.raid_disks % conf->geo.near_copies) blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index 05e4a105b9c7..9359828ffe26 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -664,7 +664,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) bi->bi_rw |= REQ_FLUSH; - bi->bi_vcnt = 1; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; bi->bi_io_vec[0].bv_offset = 0; bi->bi_size = STRIPE_SIZE; @@ -702,7 +701,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) else rbi->bi_sector = (sh->sector + rrdev->data_offset); - rbi->bi_vcnt = 1; rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; rbi->bi_io_vec[0].bv_offset = 0; rbi->bi_size = STRIPE_SIZE; @@ -5466,7 +5464,7 @@ static int run(struct mddev *mddev) if (mddev->major_version == 0 && mddev->minor_version > 90) rdev->recovery_offset = reshape_offset; - + if (rdev->recovery_offset < reshape_offset) { /* We need to check old and new layout */ if (!only_parity(rdev->raid_disk, @@ -5589,8 +5587,6 @@ static int run(struct mddev *mddev) */ mddev->queue->limits.discard_zeroes_data = 0; - blk_queue_max_write_same_sectors(mddev->queue, 0); - rdev_for_each(rdev, mddev) { disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); diff --git a/trunk/drivers/media/Kconfig b/trunk/drivers/media/Kconfig index 8270388e2a0d..7f5a7cac6dc7 100644 --- a/trunk/drivers/media/Kconfig +++ b/trunk/drivers/media/Kconfig @@ -136,9 +136,9 @@ config DVB_NET # This Kconfig option is used by both PCI and USB drivers config TTPCI_EEPROM - tristate - depends on I2C - default n + tristate + depends on I2C + default n source "drivers/media/dvb-core/Kconfig" @@ -189,12 +189,6 @@ config MEDIA_SUBDRV_AUTOSELECT If unsure say Y. -config MEDIA_ATTACH - bool - depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT - depends on MODULES - default MODULES - source "drivers/media/i2c/Kconfig" source "drivers/media/tuners/Kconfig" source "drivers/media/dvb-frontends/Kconfig" diff --git a/trunk/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/trunk/drivers/media/i2c/s5c73m3/s5c73m3-core.c index 9eac5310942f..cb52438e53ac 100644 --- a/trunk/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/trunk/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -956,7 +956,7 @@ static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd, if (fie->pad != OIF_SOURCE_PAD) return -EINVAL; - if (fie->index >= ARRAY_SIZE(s5c73m3_intervals)) + if (fie->index > ARRAY_SIZE(s5c73m3_intervals)) return -EINVAL; mutex_lock(&state->lock); diff --git a/trunk/drivers/media/pci/cx88/cx88-alsa.c b/trunk/drivers/media/pci/cx88/cx88-alsa.c index aba5b1c649e6..27d62623274b 100644 --- a/trunk/drivers/media/pci/cx88/cx88-alsa.c +++ b/trunk/drivers/media/pci/cx88/cx88-alsa.c @@ -615,7 +615,7 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol, int changed = 0; u32 old; - if (core->sd_wm8775) + if (core->board.audio_chip == V4L2_IDENT_WM8775) snd_cx88_wm8775_volume_put(kcontrol, value); left = value->value.integer.value[0] & 0x3f; @@ -682,7 +682,8 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol, vol ^= bit; cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol); /* Pass mute onto any WM8775 */ - if (core->sd_wm8775 && ((1<<6) == bit)) + if ((core->board.audio_chip == V4L2_IDENT_WM8775) && + ((1<<6) == bit)) wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit)); ret = 1; } @@ -902,7 +903,7 @@ static int cx88_audio_initdev(struct pci_dev *pci, goto error; /* If there's a wm8775 then add a Line-In ALC switch */ - if (core->sd_wm8775) + if (core->board.audio_chip == V4L2_IDENT_WM8775) snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip)); strcpy (card->driver, "CX88x"); diff --git a/trunk/drivers/media/pci/cx88/cx88-video.c b/trunk/drivers/media/pci/cx88/cx88-video.c index c7a9be1065c0..1b00615fd395 100644 --- a/trunk/drivers/media/pci/cx88/cx88-video.c +++ b/trunk/drivers/media/pci/cx88/cx88-video.c @@ -385,7 +385,8 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input) /* The wm8775 module has the "2" route hardwired into the initialization. Some boards may use different routes for different inputs. HVR-1300 surely does */ - if (core->sd_wm8775) { + if (core->board.audio_chip && + core->board.audio_chip == V4L2_IDENT_WM8775) { call_all(core, audio, s_routing, INPUT(input).audioroute, 0, 0); } @@ -770,7 +771,8 @@ static int video_open(struct file *file) cx_write(MO_GP1_IO, core->board.radio.gpio1); cx_write(MO_GP2_IO, core->board.radio.gpio2); if (core->board.radio.audioroute) { - if (core->sd_wm8775) { + if(core->board.audio_chip && + core->board.audio_chip == V4L2_IDENT_WM8775) { call_all(core, audio, s_routing, core->board.radio.audioroute, 0, 0); } @@ -957,7 +959,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl) u32 value,mask; /* Pass changes onto any WM8775 */ - if (core->sd_wm8775) { + if (core->board.audio_chip == V4L2_IDENT_WM8775) { switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: wm8775_s_ctrl(core, ctrl->id, ctrl->val); diff --git a/trunk/drivers/media/pci/zoran/zoran.h b/trunk/drivers/media/pci/zoran/zoran.h index 5e040085c2ff..ca2754a3cd63 100644 --- a/trunk/drivers/media/pci/zoran/zoran.h +++ b/trunk/drivers/media/pci/zoran/zoran.h @@ -176,7 +176,7 @@ struct zoran_fh; struct zoran_mapping { struct zoran_fh *fh; - atomic_t count; + int count; }; struct zoran_buffer { diff --git a/trunk/drivers/media/pci/zoran/zoran_driver.c b/trunk/drivers/media/pci/zoran/zoran_driver.c index d133c30c3fdc..1168a84a737d 100644 --- a/trunk/drivers/media/pci/zoran/zoran_driver.c +++ b/trunk/drivers/media/pci/zoran/zoran_driver.c @@ -2803,7 +2803,8 @@ static void zoran_vm_open (struct vm_area_struct *vma) { struct zoran_mapping *map = vma->vm_private_data; - atomic_inc(&map->count); + + map->count++; } static void @@ -2814,7 +2815,7 @@ zoran_vm_close (struct vm_area_struct *vma) struct zoran *zr = fh->zr; int i; - if (!atomic_dec_and_mutex_lock(&map->count, &zr->resource_lock)) + if (--map->count > 0) return; dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr), @@ -2827,16 +2828,14 @@ zoran_vm_close (struct vm_area_struct *vma) kfree(map); /* Any buffers still mapped? */ - for (i = 0; i < fh->buffers.num_buffers; i++) { - if (fh->buffers.buffer[i].map) { - mutex_unlock(&zr->resource_lock); + for (i = 0; i < fh->buffers.num_buffers; i++) + if (fh->buffers.buffer[i].map) return; - } - } dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode)); + mutex_lock(&zr->resource_lock); if (fh->map_mode == ZORAN_MAP_MODE_RAW) { if (fh->buffers.active != ZORAN_FREE) { @@ -2940,7 +2939,7 @@ zoran_mmap (struct file *file, goto mmap_unlock_and_return; } map->fh = fh; - atomic_set(&map->count, 1); + map->count = 1; vma->vm_ops = &zoran_vm_ops; vma->vm_flags |= VM_DONTEXPAND; diff --git a/trunk/drivers/media/platform/coda.c b/trunk/drivers/media/platform/coda.c index 9d1481a60bd9..48b8d7af386d 100644 --- a/trunk/drivers/media/platform/coda.c +++ b/trunk/drivers/media/platform/coda.c @@ -576,14 +576,6 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); } -static int vidioc_create_bufs(struct file *file, void *priv, - struct v4l2_create_buffers *create) -{ - struct coda_ctx *ctx = fh_to_ctx(priv); - - return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create); -} - static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { @@ -618,7 +610,6 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = { .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, - .vidioc_create_bufs = vidioc_create_bufs, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, diff --git a/trunk/drivers/media/platform/davinci/vpbe_display.c b/trunk/drivers/media/platform/davinci/vpbe_display.c index d0b375cf565f..1802f11e939f 100644 --- a/trunk/drivers/media/platform/davinci/vpbe_display.c +++ b/trunk/drivers/media/platform/davinci/vpbe_display.c @@ -916,21 +916,6 @@ static int vpbe_display_s_fmt(struct file *file, void *priv, other video window */ layer->pix_fmt = *pixfmt; - if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) { - struct vpbe_layer *otherlayer; - - otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer); - /* if other layer is available, only - * claim it, do not configure it - */ - ret = osd_device->ops.request_layer(osd_device, - otherlayer->layer_info.id); - if (ret < 0) { - v4l2_err(&vpbe_dev->v4l2_dev, - "Display Manager failed to allocate layer\n"); - return -EBUSY; - } - } /* Get osd layer config */ osd_device->ops.get_layer_config(osd_device, diff --git a/trunk/drivers/media/platform/davinci/vpfe_capture.c b/trunk/drivers/media/platform/davinci/vpfe_capture.c index 93609091cb23..8c50d3074866 100644 --- a/trunk/drivers/media/platform/davinci/vpfe_capture.c +++ b/trunk/drivers/media/platform/davinci/vpfe_capture.c @@ -1837,7 +1837,7 @@ static int vpfe_probe(struct platform_device *pdev) if (NULL == ccdc_cfg) { v4l2_err(pdev->dev.driver, "Memory allocation failed for ccdc_cfg\n"); - goto probe_free_dev_mem; + goto probe_free_lock; } mutex_lock(&ccdc_lock); @@ -1991,6 +1991,7 @@ static int vpfe_probe(struct platform_device *pdev) free_irq(vpfe_dev->ccdc_irq0, vpfe_dev); probe_free_ccdc_cfg_mem: kfree(ccdc_cfg); +probe_free_lock: mutex_unlock(&ccdc_lock); probe_free_dev_mem: kfree(vpfe_dev); diff --git a/trunk/drivers/media/platform/exynos4-is/fimc-is-regs.c b/trunk/drivers/media/platform/exynos4-is/fimc-is-regs.c index d05eaa2c8490..b0ff67bc1b05 100644 --- a/trunk/drivers/media/platform/exynos4-is/fimc-is-regs.c +++ b/trunk/drivers/media/platform/exynos4-is/fimc-is-regs.c @@ -174,7 +174,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is) HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO, }; - if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd))) + if (WARN_ON(is->config_index > ARRAY_SIZE(cmd))) return -EINVAL; mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0)); diff --git a/trunk/drivers/media/platform/exynos4-is/fimc-is.c b/trunk/drivers/media/platform/exynos4-is/fimc-is.c index 0741945b79ed..47c6363d04e2 100644 --- a/trunk/drivers/media/platform/exynos4-is/fimc-is.c +++ b/trunk/drivers/media/platform/exynos4-is/fimc-is.c @@ -48,6 +48,7 @@ static char *fimc_is_clocks[ISS_CLKS_MAX] = { [ISS_CLK_LITE0] = "lite0", [ISS_CLK_LITE1] = "lite1", [ISS_CLK_MPLL] = "mpll", + [ISS_CLK_SYSREG] = "sysreg", [ISS_CLK_ISP] = "isp", [ISS_CLK_DRC] = "drc", [ISS_CLK_FD] = "fd", @@ -70,6 +71,7 @@ static void fimc_is_put_clocks(struct fimc_is *is) for (i = 0; i < ISS_CLKS_MAX; i++) { if (IS_ERR(is->clocks[i])) continue; + clk_unprepare(is->clocks[i]); clk_put(is->clocks[i]); is->clocks[i] = ERR_PTR(-EINVAL); } @@ -88,6 +90,12 @@ static int fimc_is_get_clocks(struct fimc_is *is) ret = PTR_ERR(is->clocks[i]); goto err; } + ret = clk_prepare(is->clocks[i]); + if (ret < 0) { + clk_put(is->clocks[i]); + is->clocks[i] = ERR_PTR(-EINVAL); + goto err; + } } return 0; @@ -95,7 +103,7 @@ static int fimc_is_get_clocks(struct fimc_is *is) fimc_is_put_clocks(is); dev_err(&is->pdev->dev, "failed to get clock: %s\n", fimc_is_clocks[i]); - return ret; + return -ENXIO; } static int fimc_is_setup_clocks(struct fimc_is *is) @@ -136,7 +144,7 @@ int fimc_is_enable_clocks(struct fimc_is *is) for (i = 0; i < ISS_GATE_CLKS_MAX; i++) { if (IS_ERR(is->clocks[i])) continue; - ret = clk_prepare_enable(is->clocks[i]); + ret = clk_enable(is->clocks[i]); if (ret < 0) { dev_err(&is->pdev->dev, "clock %s enable failed\n", fimc_is_clocks[i]); @@ -155,7 +163,7 @@ void fimc_is_disable_clocks(struct fimc_is *is) for (i = 0; i < ISS_GATE_CLKS_MAX; i++) { if (!IS_ERR(is->clocks[i])) { - clk_disable_unprepare(is->clocks[i]); + clk_disable(is->clocks[i]); pr_debug("disabled clock: %s\n", fimc_is_clocks[i]); } } @@ -318,11 +326,6 @@ int fimc_is_start_firmware(struct fimc_is *is) struct device *dev = &is->pdev->dev; int ret; - if (is->fw.f_w == NULL) { - dev_err(dev, "firmware is not loaded\n"); - return -EINVAL; - } - memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size); wmb(); @@ -834,11 +837,23 @@ static int fimc_is_probe(struct platform_device *pdev) goto err_clk; } pm_runtime_enable(dev); - + /* + * Enable only the ISP power domain, keep FIMC-IS clocks off until + * the whole clock tree is configured. The ISP power domain needs + * be active in order to acces any CMU_ISP clock registers. + */ ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_irq; + ret = fimc_is_setup_clocks(is); + pm_runtime_put_sync(dev); + + if (ret < 0) + goto err_irq; + + is->clk_init = true; + is->alloc_ctx = vb2_dma_contig_init_ctx(dev); if (IS_ERR(is->alloc_ctx)) { ret = PTR_ERR(is->alloc_ctx); @@ -860,8 +875,6 @@ static int fimc_is_probe(struct platform_device *pdev) if (ret < 0) goto err_dfs; - pm_runtime_put_sync(dev); - dev_dbg(dev, "FIMC-IS registered successfully\n"); return 0; @@ -881,11 +894,9 @@ static int fimc_is_probe(struct platform_device *pdev) static int fimc_is_runtime_resume(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); - int ret; - ret = fimc_is_setup_clocks(is); - if (ret) - return ret; + if (!is->clk_init) + return 0; return fimc_is_enable_clocks(is); } @@ -894,7 +905,9 @@ static int fimc_is_runtime_suspend(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); - fimc_is_disable_clocks(is); + if (is->clk_init) + fimc_is_disable_clocks(is); + return 0; } @@ -928,8 +941,7 @@ static int fimc_is_remove(struct platform_device *pdev) vb2_dma_contig_cleanup_ctx(is->alloc_ctx); fimc_is_put_clocks(is); fimc_is_debugfs_remove(is); - if (is->fw.f_w) - release_firmware(is->fw.f_w); + release_firmware(is->fw.f_w); fimc_is_free_cpu_memory(is); return 0; diff --git a/trunk/drivers/media/platform/exynos4-is/fimc-is.h b/trunk/drivers/media/platform/exynos4-is/fimc-is.h index d7db133b493f..f5275a5b0156 100644 --- a/trunk/drivers/media/platform/exynos4-is/fimc-is.h +++ b/trunk/drivers/media/platform/exynos4-is/fimc-is.h @@ -73,6 +73,7 @@ enum { ISS_CLK_LITE0, ISS_CLK_LITE1, ISS_CLK_MPLL, + ISS_CLK_SYSREG, ISS_CLK_ISP, ISS_CLK_DRC, ISS_CLK_FD, @@ -264,6 +265,7 @@ struct fimc_is { spinlock_t slock; struct clk *clocks[ISS_CLKS_MAX]; + bool clk_init; void __iomem *regs; void __iomem *pmu_regs; int irq; diff --git a/trunk/drivers/media/platform/exynos4-is/fimc-isp.c b/trunk/drivers/media/platform/exynos4-is/fimc-isp.c index 7ede30b5910f..d63947f7b302 100644 --- a/trunk/drivers/media/platform/exynos4-is/fimc-isp.c +++ b/trunk/drivers/media/platform/exynos4-is/fimc-isp.c @@ -138,7 +138,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd, return 0; } - mf->colorspace = V4L2_COLORSPACE_SRGB; + mf->colorspace = V4L2_COLORSPACE_JPEG; mutex_lock(&isp->subdev_lock); __is_get_frame_size(is, &cur_fmt); @@ -194,7 +194,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd, v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n", __func__, fmt->pad, mf->code, mf->width, mf->height); - mf->colorspace = V4L2_COLORSPACE_SRGB; + mf->colorspace = V4L2_COLORSPACE_JPEG; mutex_lock(&isp->subdev_lock); __isp_subdev_try_format(isp, fmt); diff --git a/trunk/drivers/media/platform/exynos4-is/mipi-csis.c b/trunk/drivers/media/platform/exynos4-is/mipi-csis.c index 254d70fe762a..a2eda9d5ac87 100644 --- a/trunk/drivers/media/platform/exynos4-is/mipi-csis.c +++ b/trunk/drivers/media/platform/exynos4-is/mipi-csis.c @@ -746,7 +746,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev, node = v4l2_of_get_next_endpoint(node, NULL); if (!node) { dev_err(&pdev->dev, "No port node at %s\n", - pdev->dev.of_node->full_name); + node->full_name); return -EINVAL; } /* Get port node and validate MIPI-CSI channel id. */ diff --git a/trunk/drivers/media/platform/omap/omap_vout.c b/trunk/drivers/media/platform/omap/omap_vout.c index d338b19da544..477268a2415f 100644 --- a/trunk/drivers/media/platform/omap/omap_vout.c +++ b/trunk/drivers/media/platform/omap/omap_vout.c @@ -2150,9 +2150,6 @@ static int __init omap_vout_probe(struct platform_device *pdev) struct omap_dss_device *def_display; struct omap2video_device *vid_dev = NULL; - if (omapdss_is_initialized() == false) - return -EPROBE_DEFER; - ret = omapdss_compat_init(); if (ret) { dev_err(&pdev->dev, "failed to init dss\n"); diff --git a/trunk/drivers/media/platform/s3c-camif/camif-core.h b/trunk/drivers/media/platform/s3c-camif/camif-core.h index 35d2fcdc0036..261134baa655 100644 --- a/trunk/drivers/media/platform/s3c-camif/camif-core.h +++ b/trunk/drivers/media/platform/s3c-camif/camif-core.h @@ -229,7 +229,7 @@ struct camif_vp { unsigned int state; u16 fmt_flags; u8 id; - u16 rotation; + u8 rotation; u8 hflip; u8 vflip; unsigned int offset; diff --git a/trunk/drivers/media/platform/s5p-jpeg/Makefile b/trunk/drivers/media/platform/s5p-jpeg/Makefile index d18cb5edd2d5..ddc2900d88a2 100644 --- a/trunk/drivers/media/platform/s5p-jpeg/Makefile +++ b/trunk/drivers/media/platform/s5p-jpeg/Makefile @@ -1,2 +1,2 @@ s5p-jpeg-objs := jpeg-core.o -obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o +obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o diff --git a/trunk/drivers/media/platform/s5p-mfc/Makefile b/trunk/drivers/media/platform/s5p-mfc/Makefile index 15f59b324fef..379008c6d09a 100644 --- a/trunk/drivers/media/platform/s5p-mfc/Makefile +++ b/trunk/drivers/media/platform/s5p-mfc/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o +obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc.c index d12faa691af8..01f9ae0dadb0 100644 --- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -397,7 +397,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, leave_handle_frame: spin_unlock_irqrestore(&dev->irqlock, flags); if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING) - || ctx->dst_queue_cnt < ctx->pb_count) + || ctx->dst_queue_cnt < ctx->dpb_count) clear_work_bit(ctx); s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); wake_up_ctx(ctx, reason, err); @@ -473,7 +473,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx, s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx); - ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count, + ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count, dev); ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count, dev); @@ -562,7 +562,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx, struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *mb_entry; - mfc_debug(2, "Stream completed\n"); + mfc_debug(2, "Stream completed"); s5p_mfc_clear_int_flags(dev); ctx->int_type = reason; @@ -1362,6 +1362,7 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = { .port_num = MFC_NUM_PORTS, .buf_size = &buf_size_v5, .buf_align = &mfc_buf_align_v5, + .mclk_name = "sclk_mfc", .fw_name = "s5p-mfc.fw", }; @@ -1388,6 +1389,7 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = { .port_num = MFC_NUM_PORTS_V6, .buf_size = &buf_size_v6, .buf_align = &mfc_buf_align_v6, + .mclk_name = "aclk_333", .fw_name = "s5p-mfc-v6.fw", }; diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index ef4074cd5316..202d1d7a37a8 100644 --- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_common.h @@ -138,7 +138,6 @@ enum s5p_mfc_inst_state { MFCINST_INIT = 100, MFCINST_GOT_INST, MFCINST_HEAD_PARSED, - MFCINST_HEAD_PRODUCED, MFCINST_BUFS_SET, MFCINST_RUNNING, MFCINST_FINISHING, @@ -232,6 +231,7 @@ struct s5p_mfc_variant { unsigned int port_num; struct s5p_mfc_buf_size *buf_size; struct s5p_mfc_buf_align *buf_align; + char *mclk_name; char *fw_name; }; @@ -438,7 +438,7 @@ struct s5p_mfc_enc_params { u32 rc_framerate_num; u32 rc_framerate_denom; - struct { + union { struct s5p_mfc_h264_enc_params h264; struct s5p_mfc_mpeg4_enc_params mpeg4; } codec; @@ -602,7 +602,7 @@ struct s5p_mfc_ctx { int after_packed_pb; int sei_fp_parse; - int pb_count; + int dpb_count; int total_dpb_count; int mv_count; /* Buffers */ diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index dc1fc94a488d..2e5f30b40dea 100644 --- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -38,7 +38,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev) dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size, &dev->bank1, GFP_KERNEL); - if (IS_ERR_OR_NULL(dev->fw_virt_addr)) { + if (IS_ERR(dev->fw_virt_addr)) { dev->fw_virt_addr = NULL; mfc_err("Allocating bitprocessor buffer failed\n"); return -ENOMEM; diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h index 8e608f5aa0d7..bd5cd4ae993c 100644 --- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h +++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h @@ -30,8 +30,8 @@ extern int debug; #define mfc_debug(level, fmt, args...) #endif -#define mfc_debug_enter() mfc_debug(5, "enter\n") -#define mfc_debug_leave() mfc_debug(5, "leave\n") +#define mfc_debug_enter() mfc_debug(5, "enter") +#define mfc_debug_leave() mfc_debug(5, "leave") #define mfc_err(fmt, args...) \ do { \ diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 00b07032f4f0..4af53bd2f182 100644 --- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -210,11 +210,11 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) /* Context is to decode a frame */ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_RUNNING && - ctx->dst_queue_cnt >= ctx->pb_count) + ctx->dst_queue_cnt >= ctx->dpb_count) return 1; /* Context is to return last frame */ if (ctx->state == MFCINST_FINISHING && - ctx->dst_queue_cnt >= ctx->pb_count) + ctx->dst_queue_cnt >= ctx->dpb_count) return 1; /* Context is to set buffers */ if (ctx->src_queue_cnt >= 1 && @@ -224,7 +224,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) /* Resolution change */ if ((ctx->state == MFCINST_RES_CHANGE_INIT || ctx->state == MFCINST_RES_CHANGE_FLUSH) && - ctx->dst_queue_cnt >= ctx->pb_count) + ctx->dst_queue_cnt >= ctx->dpb_count) return 1; if (ctx->state == MFCINST_RES_CHANGE_END && ctx->src_queue_cnt >= 1) @@ -537,7 +537,7 @@ static int vidioc_reqbufs(struct file *file, void *priv, mfc_err("vb2_reqbufs on capture failed\n"); return ret; } - if (reqbufs->count < ctx->pb_count) { + if (reqbufs->count < ctx->dpb_count) { mfc_err("Not enough buffers allocated\n"); reqbufs->count = 0; s5p_mfc_clock_on(); @@ -751,7 +751,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: if (ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { - ctrl->val = ctx->pb_count; + ctrl->val = ctx->dpb_count; break; } else if (ctx->state != MFCINST_INIT) { v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); @@ -763,7 +763,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl) S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0); if (ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { - ctrl->val = ctx->pb_count; + ctrl->val = ctx->dpb_count; } else { v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); return -EINVAL; @@ -924,10 +924,10 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq, /* Output plane count is 2 - one for Y and one for CbCr */ *plane_count = 2; /* Setup buffer count */ - if (*buf_count < ctx->pb_count) - *buf_count = ctx->pb_count; - if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB) - *buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB; + if (*buf_count < ctx->dpb_count) + *buf_count = ctx->dpb_count; + if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB) + *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB; if (*buf_count > MFC_MAX_BUFFERS) *buf_count = MFC_MAX_BUFFERS; } else { diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 2549967b2f85..4f6b553c4b2d 100644 --- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -592,7 +592,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) return 1; /* context is ready to encode a frame */ if ((ctx->state == MFCINST_RUNNING || - ctx->state == MFCINST_HEAD_PRODUCED) && + ctx->state == MFCINST_HEAD_PARSED) && ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1) return 1; /* context is ready to encode remaining frames */ @@ -649,7 +649,6 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_buf *dst_mb; unsigned long flags; - unsigned int enc_pb_count; if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) { spin_lock_irqsave(&dev->irqlock, flags); @@ -662,20 +661,19 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE); spin_unlock_irqrestore(&dev->irqlock, flags); } - - if (!IS_MFCV6(dev)) { + if (IS_MFCV6(dev)) { + ctx->state = MFCINST_HEAD_PARSED; /* for INIT_BUFFER cmd */ + } else { ctx->state = MFCINST_RUNNING; if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); - } else { - enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops, - get_enc_dpb_count, dev); - if (ctx->pb_count < enc_pb_count) - ctx->pb_count = enc_pb_count; - ctx->state = MFCINST_HEAD_PRODUCED; } + if (IS_MFCV6(dev)) + ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, + get_enc_dpb_count, dev); + return 0; } @@ -719,9 +717,9 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); - mfc_debug(2, "Encoded slice type: %d\n", slice_type); - mfc_debug(2, "Encoded stream size: %d\n", strm_size); - mfc_debug(2, "Display order: %d\n", + mfc_debug(2, "Encoded slice type: %d", slice_type); + mfc_debug(2, "Encoded stream size: %d", strm_size); + mfc_debug(2, "Display order: %d", mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT)); spin_lock_irqsave(&dev->irqlock, flags); if (slice_type >= 0) { @@ -1057,13 +1055,15 @@ static int vidioc_reqbufs(struct file *file, void *priv, } ctx->capture_state = QUEUE_BUFS_REQUESTED; - ret = s5p_mfc_hw_call(ctx->dev->mfc_ops, - alloc_codec_buffers, ctx); - if (ret) { - mfc_err("Failed to allocate encoding buffers\n"); - reqbufs->count = 0; - ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); - return -ENOMEM; + if (!IS_MFCV6(dev)) { + ret = s5p_mfc_hw_call(ctx->dev->mfc_ops, + alloc_codec_buffers, ctx); + if (ret) { + mfc_err("Failed to allocate encoding buffers\n"); + reqbufs->count = 0; + ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); + return -ENOMEM; + } } } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { if (ctx->output_state != QUEUE_FREE) { @@ -1071,19 +1071,6 @@ static int vidioc_reqbufs(struct file *file, void *priv, ctx->output_state); return -EINVAL; } - - if (IS_MFCV6(dev)) { - /* Check for min encoder buffers */ - if (ctx->pb_count && - (reqbufs->count < ctx->pb_count)) { - reqbufs->count = ctx->pb_count; - mfc_debug(2, "Minimum %d output buffers needed\n", - ctx->pb_count); - } else { - ctx->pb_count = reqbufs->count; - } - } - ret = vb2_reqbufs(&ctx->vq_src, reqbufs); if (ret != 0) { mfc_err("error in vb2_reqbufs() for E(S)\n"); @@ -1546,14 +1533,14 @@ int vidioc_encoder_cmd(struct file *file, void *priv, spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue)) { - mfc_debug(2, "EOS: empty src queue, entering finishing state\n"); + mfc_debug(2, "EOS: empty src queue, entering finishing state"); ctx->state = MFCINST_FINISHING; if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); spin_unlock_irqrestore(&dev->irqlock, flags); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } else { - mfc_debug(2, "EOS: marking last buffer of stream\n"); + mfc_debug(2, "EOS: marking last buffer of stream"); buf = list_entry(ctx->src_queue.prev, struct s5p_mfc_buf, list); if (buf->flags & MFC_BUF_FLAG_USED) @@ -1622,9 +1609,9 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb) mfc_err("failed to get plane cookie\n"); return -EINVAL; } - mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx\n", - vb->v4l2_buf.index, i, - vb2_dma_contig_plane_dma_addr(vb, i)); + mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx", + vb->v4l2_buf.index, i, + vb2_dma_contig_plane_dma_addr(vb, i)); } return 0; } @@ -1773,27 +1760,11 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count) struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; - if (IS_MFCV6(dev) && (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) { - - if ((ctx->state == MFCINST_GOT_INST) && - (dev->curr_ctx == ctx->num) && dev->hw_lock) { - s5p_mfc_wait_for_done_ctx(ctx, - S5P_MFC_R2H_CMD_SEQ_DONE_RET, - 0); - } - - if (ctx->src_bufs_cnt < ctx->pb_count) { - mfc_err("Need minimum %d OUTPUT buffers\n", - ctx->pb_count); - return -EINVAL; - } - } - + v4l2_ctrl_handler_setup(&ctx->ctrl_handler); /* If context is ready then dev = work->data;schedule it to run */ if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); - return 0; } @@ -1949,7 +1920,6 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx) if (controls[i].is_volatile && ctx->ctrls[i]) ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE; } - v4l2_ctrl_handler_setup(&ctx->ctrl_handler); return 0; } diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c index 368582b091bf..0af05a2d1cd4 100644 --- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c +++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c @@ -1275,8 +1275,8 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); - mfc_debug(2, "encoding buffer with index=%d state=%d\n", - src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); + mfc_debug(2, "encoding buffer with index=%d state=%d", + src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); s5p_mfc_encode_one_frame_v5(ctx); return 0; } diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index 66f0d042357f..7e76fce2e524 100644 --- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c @@ -62,6 +62,12 @@ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx) /* NOP */ } +static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev) +{ + /* NOP */ + return -1; +} + /* Allocate codec buffers */ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) { @@ -161,7 +167,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size + ctx->tmv_buffer_size + - (ctx->pb_count * (ctx->luma_dpb_size + + (ctx->dpb_count * (ctx->luma_dpb_size + ctx->chroma_dpb_size + ctx->me_buffer_size)); ctx->bank2.size = 0; break; @@ -175,7 +181,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size + ctx->tmv_buffer_size + - (ctx->pb_count * (ctx->luma_dpb_size + + (ctx->dpb_count * (ctx->luma_dpb_size + ctx->chroma_dpb_size + ctx->me_buffer_size)); ctx->bank2.size = 0; break; @@ -192,6 +198,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) } BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); } + return 0; } @@ -442,8 +449,8 @@ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx, WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */ WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6); - mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n", - addr, size); + mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d", + addr, size); return 0; } @@ -456,8 +463,8 @@ static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */ WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6); - mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr); - mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr); + mfc_debug(2, "enc src y buf addr: 0x%08lx", y_addr); + mfc_debug(2, "enc src c buf addr: 0x%08lx", c_addr); } static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, @@ -472,8 +479,8 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6); enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6); - mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr); - mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr); + mfc_debug(2, "recon y addr: 0x%08lx", enc_recon_y_addr); + mfc_debug(2, "recon c addr: 0x%08lx", enc_recon_c_addr); } /* Set encoding ref & codec buffer */ @@ -490,7 +497,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx) mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1); - for (i = 0; i < ctx->pb_count; i++) { + for (i = 0; i < ctx->dpb_count; i++) { WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i)); buf_addr1 += ctx->luma_dpb_size; WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i)); @@ -513,7 +520,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx) buf_size1 -= ctx->tmv_buffer_size; mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n", - buf_addr1, buf_size1, ctx->pb_count); + buf_addr1, buf_size1, ctx->dpb_count); if (buf_size1 < 0) { mfc_debug(2, "Not enough memory has been allocated.\n"); return -ENOMEM; @@ -1424,8 +1431,8 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0); src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1); - mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr); - mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr); + mfc_debug(2, "enc src y addr: 0x%08lx", src_y_addr); + mfc_debug(2, "enc src c addr: 0x%08lx", src_c_addr); s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr); @@ -1515,6 +1522,22 @@ static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx) struct s5p_mfc_dev *dev = ctx->dev; int ret; + ret = s5p_mfc_alloc_codec_buffers_v6(ctx); + if (ret) { + mfc_err("Failed to allocate encoding buffers.\n"); + return -ENOMEM; + } + + /* Header was generated now starting processing + * First set the reference frame buffers + */ + if (ctx->capture_state != QUEUE_BUFS_REQUESTED) { + mfc_err("It seems that destionation buffers were not\n" + "requested.MFC requires that header should be generated\n" + "before allocating codec buffer.\n"); + return -EAGAIN; + } + dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_set_enc_ref_buffer_v6(ctx); @@ -1559,7 +1582,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev) mfc_debug(1, "Seting new context to %p\n", ctx); /* Got context to run in ctx */ mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n", - ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt); + ctx->dst_queue_cnt, ctx->dpb_count, ctx->src_queue_cnt); mfc_debug(1, "ctx->state=%d\n", ctx->state); /* Last frame has already been sent to MFC * Now obtaining frames from MFC buffer */ @@ -1624,7 +1647,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev) case MFCINST_GOT_INST: s5p_mfc_run_init_enc(ctx); break; - case MFCINST_HEAD_PRODUCED: + case MFCINST_HEAD_PARSED: /* Only for MFC6.x */ ret = s5p_mfc_run_init_enc_buffers(ctx); break; default: @@ -1707,7 +1730,7 @@ static int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev) return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6); } -static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev) +static int s5p_mfc_get_decoded_status_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6); } diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c index 11d5f1dada32..6aa38a56aaf2 100644 --- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c +++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c @@ -50,6 +50,19 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev) goto err_p_ip_clk; } + pm->clock = clk_get(&dev->plat_dev->dev, dev->variant->mclk_name); + if (IS_ERR(pm->clock)) { + mfc_err("Failed to get MFC clock\n"); + ret = PTR_ERR(pm->clock); + goto err_g_ip_clk_2; + } + + ret = clk_prepare(pm->clock); + if (ret) { + mfc_err("Failed to prepare MFC clock\n"); + goto err_p_ip_clk_2; + } + atomic_set(&pm->power, 0); #ifdef CONFIG_PM_RUNTIME pm->device = &dev->plat_dev->dev; @@ -59,6 +72,10 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev) atomic_set(&clk_ref, 0); #endif return 0; +err_p_ip_clk_2: + clk_put(pm->clock); +err_g_ip_clk_2: + clk_unprepare(pm->clock_gate); err_p_ip_clk: clk_put(pm->clock_gate); err_g_ip_clk: @@ -69,6 +86,8 @@ void s5p_mfc_final_pm(struct s5p_mfc_dev *dev) { clk_unprepare(pm->clock_gate); clk_put(pm->clock_gate); + clk_unprepare(pm->clock); + clk_put(pm->clock); #ifdef CONFIG_PM_RUNTIME pm_runtime_disable(pm->device); #endif @@ -79,7 +98,7 @@ int s5p_mfc_clock_on(void) int ret; #ifdef CLK_DEBUG atomic_inc(&clk_ref); - mfc_debug(3, "+ %d\n", atomic_read(&clk_ref)); + mfc_debug(3, "+ %d", atomic_read(&clk_ref)); #endif ret = clk_enable(pm->clock_gate); return ret; @@ -89,7 +108,7 @@ void s5p_mfc_clock_off(void) { #ifdef CLK_DEBUG atomic_dec(&clk_ref); - mfc_debug(3, "- %d\n", atomic_read(&clk_ref)); + mfc_debug(3, "- %d", atomic_read(&clk_ref)); #endif clk_disable(pm->clock_gate); } diff --git a/trunk/drivers/media/platform/sh_veu.c b/trunk/drivers/media/platform/sh_veu.c index 59a9deefb242..0b32cc3f6a47 100644 --- a/trunk/drivers/media/platform/sh_veu.c +++ b/trunk/drivers/media/platform/sh_veu.c @@ -905,11 +905,11 @@ static int sh_veu_queue_setup(struct vb2_queue *vq, if (ftmp.fmt.pix.width != pix->width || ftmp.fmt.pix.height != pix->height) return -EINVAL; - size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth : - pix->width * pix->height * fmt->depth / fmt->ydepth; + size = pix->bytesperline ? pix->bytesperline * pix->height : + pix->width * pix->height * fmt->depth >> 3; } else { vfmt = sh_veu_get_vfmt(veu, vq->type); - size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth; + size = vfmt->bytesperline * vfmt->frame.height; } if (count < 2) @@ -1033,6 +1033,8 @@ static int sh_veu_release(struct file *file) dev_dbg(veu->dev, "Releasing instance %p\n", veu_file); + pm_runtime_put(veu->dev); + if (veu_file == veu->capture) { veu->capture = NULL; vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)); @@ -1048,8 +1050,6 @@ static int sh_veu_release(struct file *file) veu->m2m_ctx = NULL; } - pm_runtime_put(veu->dev); - kfree(veu_file); return 0; @@ -1138,7 +1138,10 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id) veu->xaction++; - return IRQ_WAKE_THREAD; + if (!veu->aborting) + return IRQ_WAKE_THREAD; + + return IRQ_HANDLED; } static int sh_veu_probe(struct platform_device *pdev) diff --git a/trunk/drivers/media/platform/soc_camera/soc_camera.c b/trunk/drivers/media/platform/soc_camera/soc_camera.c index 3a4efbdc7668..eea832c5fd01 100644 --- a/trunk/drivers/media/platform/soc_camera/soc_camera.c +++ b/trunk/drivers/media/platform/soc_camera/soc_camera.c @@ -643,9 +643,9 @@ static int soc_camera_close(struct file *file) if (ici->ops->init_videobuf2) vb2_queue_release(&icd->vb2_vidq); - __soc_camera_power_off(icd); - ici->ops->remove(icd); + + __soc_camera_power_off(icd); } if (icd->streamer == file) diff --git a/trunk/drivers/media/radio/Kconfig b/trunk/drivers/media/radio/Kconfig index d529ba788f41..c0beee2fa37c 100644 --- a/trunk/drivers/media/radio/Kconfig +++ b/trunk/drivers/media/radio/Kconfig @@ -22,7 +22,6 @@ config RADIO_SI476X tristate "Silicon Laboratories Si476x I2C FM Radio" depends on I2C && VIDEO_V4L2 depends on MFD_SI476X_CORE - depends on SND_SOC select SND_SOC_SI476X ---help--- Choose Y here if you have this FM radio chip. diff --git a/trunk/drivers/media/radio/radio-si476x.c b/trunk/drivers/media/radio/radio-si476x.c index 9dc8bafe6486..9430c6a29937 100644 --- a/trunk/drivers/media/radio/radio-si476x.c +++ b/trunk/drivers/media/radio/radio-si476x.c @@ -44,7 +44,7 @@ #define FREQ_MUL (10000000 / 625) -#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0x80 & (status)) +#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0b10000000 & (status)) #define DRIVER_NAME "si476x-radio" #define DRIVER_CARD "SI476x AM/FM Receiver" diff --git a/trunk/drivers/media/tuners/Kconfig b/trunk/drivers/media/tuners/Kconfig index 15665debc572..f6768cad001a 100644 --- a/trunk/drivers/media/tuners/Kconfig +++ b/trunk/drivers/media/tuners/Kconfig @@ -1,3 +1,23 @@ +config MEDIA_ATTACH + bool "Load and attach frontend and tuner driver modules as needed" + depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT + depends on MODULES + default y if !EXPERT + help + Remove the static dependency of DVB card drivers on all + frontend modules for all possible card variants. Instead, + allow the card drivers to only load the frontend modules + they require. + + Also, tuner module will automatically load a tuner driver + when needed, for analog mode. + + This saves several KBytes of memory. + + Note: You will need module-init-tools v3.2 or later for this feature. + + If unsure say Y. + # Analog TV tuners, auto-loaded via tuner.ko config MEDIA_TUNER tristate diff --git a/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 2cc8ec70e3b6..22015fe1a0f3 100644 --- a/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -376,7 +376,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf}; struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf}; struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; - struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf}; + struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf}; dev_dbg(&d->udev->dev, "%s:\n", __func__); @@ -481,9 +481,9 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) goto found; } - /* check R820T ID register; reg=00 val=69 */ + /* check R820T by reading tuner stats at I2C addr 0x1a */ ret = rtl28xxu_ctrl_msg(d, &req_r820t); - if (ret == 0 && buf[0] == 0x69) { + if (ret == 0) { priv->tuner = TUNER_RTL2832_R820T; priv->tuner_name = "R820T"; goto found; diff --git a/trunk/drivers/media/usb/gspca/sonixb.c b/trunk/drivers/media/usb/gspca/sonixb.c index d7ff3b9687c5..3fe207e038c7 100644 --- a/trunk/drivers/media/usb/gspca/sonixb.c +++ b/trunk/drivers/media/usb/gspca/sonixb.c @@ -1159,13 +1159,6 @@ static int sd_start(struct gspca_dev *gspca_dev) regs[0x01] = 0x44; /* Select 24 Mhz clock */ regs[0x12] = 0x02; /* Set hstart to 2 */ } - break; - case SENSOR_PAS202: - /* For some unknown reason we need to increase hstart by 1 on - the sn9c103, otherwise we get wrong colors (bayer shift). */ - if (sd->bridge == BRIDGE_103) - regs[0x12] += 1; - break; } /* Disable compression when the raw bayer format has been selected */ if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW) diff --git a/trunk/drivers/media/usb/pwc/pwc.h b/trunk/drivers/media/usb/pwc/pwc.h index 81b017a554bc..7a6a0d39c2c6 100644 --- a/trunk/drivers/media/usb/pwc/pwc.h +++ b/trunk/drivers/media/usb/pwc/pwc.h @@ -226,7 +226,7 @@ struct pwc_device struct list_head queued_bufs; spinlock_t queued_bufs_lock; /* Protects queued_bufs */ - /* If taking both locks vb_queue_lock must always be locked first! */ + /* Note if taking both locks v4l2_lock must always be locked first! */ struct mutex v4l2_lock; /* Protects everything else */ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */ diff --git a/trunk/drivers/media/v4l2-core/v4l2-ctrls.c b/trunk/drivers/media/v4l2-core/v4l2-ctrls.c index fccd08b66d1a..ebb8e48619a2 100644 --- a/trunk/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/trunk/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1835,8 +1835,6 @@ bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl) { if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX) return true; - if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX) - return true; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: case V4L2_CID_AUDIO_VOLUME: diff --git a/trunk/drivers/media/v4l2-core/v4l2-ioctl.c b/trunk/drivers/media/v4l2-core/v4l2-ioctl.c index 7658586fe5f4..f81bda1a48ec 100644 --- a/trunk/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/trunk/drivers/media/v4l2-core/v4l2-ioctl.c @@ -243,6 +243,7 @@ static void v4l_print_format(const void *arg, bool write_only) const struct v4l2_vbi_format *vbi; const struct v4l2_sliced_vbi_format *sliced; const struct v4l2_window *win; + const struct v4l2_clip *clip; unsigned i; pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); @@ -252,7 +253,7 @@ static void v4l_print_format(const void *arg, bool write_only) pix = &p->fmt.pix; pr_cont(", width=%u, height=%u, " "pixelformat=%c%c%c%c, field=%s, " - "bytesperline=%u, sizeimage=%u, colorspace=%d\n", + "bytesperline=%u sizeimage=%u, colorspace=%d\n", pix->width, pix->height, (pix->pixelformat & 0xff), (pix->pixelformat >> 8) & 0xff, @@ -283,14 +284,20 @@ static void v4l_print_format(const void *arg, bool write_only) case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: win = &p->fmt.win; - /* Note: we can't print the clip list here since the clips - * pointer is a userspace pointer, not a kernelspace - * pointer. */ - pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n", - win->w.width, win->w.height, win->w.left, win->w.top, + pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, " + "chromakey=0x%08x, bitmap=%p, " + "global_alpha=0x%02x\n", + win->w.width, win->w.height, + win->w.left, win->w.top, prt_names(win->field, v4l2_field_names), - win->chromakey, win->clipcount, win->clips, - win->bitmap, win->global_alpha); + win->chromakey, win->bitmap, win->global_alpha); + clip = win->clips; + for (i = 0; i < win->clipcount; i++) { + printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n", + i, clip->c.width, clip->c.height, + clip->c.left, clip->c.top); + clip = clip->next; + } break; case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: @@ -325,7 +332,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only) pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, " "height=%u, pixelformat=%c%c%c%c, " - "bytesperline=%u, sizeimage=%u, colorspace=%d\n", + "bytesperline=%u sizeimage=%u, colorspace=%d\n", p->capability, p->flags, p->base, p->fmt.width, p->fmt.height, (p->fmt.pixelformat & 0xff), @@ -346,7 +353,7 @@ static void v4l_print_modulator(const void *arg, bool write_only) const struct v4l2_modulator *p = arg; if (write_only) - pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans); + pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans); else pr_cont("index=%u, name=%.*s, capability=0x%x, " "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n", @@ -438,13 +445,13 @@ static void v4l_print_buffer(const void *arg, bool write_only) for (i = 0; i < p->length; ++i) { plane = &p->m.planes[i]; printk(KERN_DEBUG - "plane %d: bytesused=%d, data_offset=0x%08x, " + "plane %d: bytesused=%d, data_offset=0x%08x " "offset/userptr=0x%lx, length=%d\n", i, plane->bytesused, plane->data_offset, plane->m.userptr, plane->length); } } else { - pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n", + pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n", p->bytesused, p->m.userptr, p->length); } @@ -497,8 +504,6 @@ static void v4l_print_streamparm(const void *arg, bool write_only) c->capability, c->outputmode, c->timeperframe.numerator, c->timeperframe.denominator, c->extendedmode, c->writebuffers); - } else { - pr_cont("\n"); } } @@ -729,11 +734,11 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only) p->type); switch (p->type) { case V4L2_FRMSIZE_TYPE_DISCRETE: - pr_cont(", wxh=%ux%u\n", + pr_cont(" wxh=%ux%u\n", p->discrete.width, p->discrete.height); break; case V4L2_FRMSIZE_TYPE_STEPWISE: - pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n", + pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n", p->stepwise.min_width, p->stepwise.min_height, p->stepwise.step_width, p->stepwise.step_height, p->stepwise.max_width, p->stepwise.max_height); @@ -759,12 +764,12 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only) p->width, p->height, p->type); switch (p->type) { case V4L2_FRMIVAL_TYPE_DISCRETE: - pr_cont(", fps=%d/%d\n", + pr_cont(" fps=%d/%d\n", p->discrete.numerator, p->discrete.denominator); break; case V4L2_FRMIVAL_TYPE_STEPWISE: - pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n", + pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n", p->stepwise.min.numerator, p->stepwise.min.denominator, p->stepwise.max.numerator, @@ -802,8 +807,8 @@ static void v4l_print_event(const void *arg, bool write_only) pr_cont("value64=%lld, ", c->value64); else pr_cont("value=%d, ", c->value); - pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, " - "default_value=%d\n", + pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d," + " default_value=%d\n", c->flags, c->minimum, c->maximum, c->step, c->default_value); break; @@ -840,7 +845,7 @@ static void v4l_print_freq_band(const void *arg, bool write_only) const struct v4l2_frequency_band *p = arg; pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, " - "rangelow=%u, rangehigh=%u, modulation=0x%x\n", + "rangelow=%u, rangehigh=%u, modulation=0x%x\n", p->tuner, p->type, p->index, p->capability, p->rangelow, p->rangehigh, p->modulation); diff --git a/trunk/drivers/media/v4l2-core/v4l2-mem2mem.c b/trunk/drivers/media/v4l2-core/v4l2-mem2mem.c index e96497f7c3ed..66f599fcb829 100644 --- a/trunk/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/trunk/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -205,7 +205,7 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) { struct v4l2_m2m_dev *m2m_dev; - unsigned long flags_job, flags_out, flags_cap; + unsigned long flags_job, flags; m2m_dev = m2m_ctx->m2m_dev; dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); @@ -223,26 +223,23 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) return; } - spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, - flags_out); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("No input buffers available\n"); return; } - spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { - spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, - flags_cap); - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, - flags_out); + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("No output buffers available\n"); return; } - spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); if (m2m_dev->m2m_ops->job_ready && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { @@ -374,20 +371,6 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, } EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); -/** - * v4l2_m2m_create_bufs() - create a source or destination buffer, depending - * on the type - */ -int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, - struct v4l2_create_buffers *create) -{ - struct vb2_queue *vq; - - vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); - return vb2_create_bufs(vq, create); -} -EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); - /** * v4l2_m2m_expbuf() - export a source or destination buffer, depending on * the type @@ -503,10 +486,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, if (m2m_ctx->m2m_dev->m2m_ops->unlock) m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); - if (list_empty(&src_q->done_list)) - poll_wait(file, &src_q->done_wq, wait); - if (list_empty(&dst_q->done_list)) - poll_wait(file, &dst_q->done_wq, wait); + poll_wait(file, &src_q->done_wq, wait); + poll_wait(file, &dst_q->done_wq, wait); if (m2m_ctx->m2m_dev->m2m_ops->lock) m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); diff --git a/trunk/drivers/media/v4l2-core/videobuf2-core.c b/trunk/drivers/media/v4l2-core/videobuf2-core.c index e3bdc3be91e1..7d833eefaf4e 100644 --- a/trunk/drivers/media/v4l2-core/videobuf2-core.c +++ b/trunk/drivers/media/v4l2-core/videobuf2-core.c @@ -2014,8 +2014,7 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) if (list_empty(&q->queued_list)) return res | POLLERR; - if (list_empty(&q->done_list)) - poll_wait(file, &q->done_wq, wait); + poll_wait(file, &q->done_wq, wait); /* * Take first buffer available for dequeuing. diff --git a/trunk/drivers/memory/emif.c b/trunk/drivers/memory/emif.c index 04644e7b42b1..cadf1cc19aaf 100644 --- a/trunk/drivers/memory/emif.c +++ b/trunk/drivers/memory/emif.c @@ -1560,6 +1560,12 @@ static int __init_or_module emif_probe(struct platform_device *pdev) platform_set_drvdata(pdev, emif); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(emif->dev, "%s: error getting memory resource\n", + __func__); + goto error; + } + emif->base = devm_ioremap_resource(emif->dev, res); if (IS_ERR(emif->base)) goto error; diff --git a/trunk/drivers/mfd/Kconfig b/trunk/drivers/mfd/Kconfig index d54e985748b7..d9aed1593e5d 100644 --- a/trunk/drivers/mfd/Kconfig +++ b/trunk/drivers/mfd/Kconfig @@ -579,7 +579,7 @@ config AB8500_CORE config AB8500_DEBUG bool "Enable debug info via debugfs" - depends on AB8500_GPADC && DEBUG_FS + depends on AB8500_CORE && DEBUG_FS default y if DEBUG_FS help Select this option if you want debug information using the debug @@ -818,7 +818,6 @@ config MFD_TPS65910 config MFD_TPS65912 bool "TI TPS65912 Power Management chip" depends on GPIOLIB - select MFD_CORE help If you say yes here you get support for the TPS65912 series of PM chips. diff --git a/trunk/drivers/mfd/ab8500-core.c b/trunk/drivers/mfd/ab8500-core.c index 258b367e3989..8e8a016effe9 100644 --- a/trunk/drivers/mfd/ab8500-core.c +++ b/trunk/drivers/mfd/ab8500-core.c @@ -867,15 +867,6 @@ static struct resource ab8500_chargalg_resources[] = {}; #ifdef CONFIG_DEBUG_FS static struct resource ab8500_debug_resources[] = { - { - .name = "IRQ_AB8500", - /* - * Number will be filled in. NOTE: this is deliberately - * not flagged as an IRQ in ordet to avoid remapping using - * the irqdomain in the MFD core, so that this IRQ passes - * unremapped to the debug code. - */ - }, { .name = "IRQ_FIRST", .start = AB8500_INT_MAIN_EXT_CH_NOT_OK, @@ -1060,7 +1051,6 @@ static struct mfd_cell ab8500_devs[] = { }, { .name = "ab8500-gpadc", - .of_compatible = "stericsson,ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), .resources = ab8500_gpadc_resources, }, @@ -1107,7 +1097,7 @@ static struct mfd_cell ab8500_devs[] = { .of_compatible = "stericsson,ab8500-denc", }, { - .name = "pinctrl-ab8500", + .name = "ab8500-gpio", .of_compatible = "stericsson,ab8500-gpio", }, { @@ -1218,7 +1208,6 @@ static struct mfd_cell ab8505_devs[] = { }, { .name = "ab8500-gpadc", - .of_compatible = "stericsson,ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8505_gpadc_resources), .resources = ab8505_gpadc_resources, }, @@ -1245,7 +1234,7 @@ static struct mfd_cell ab8505_devs[] = { .name = "ab8500-leds", }, { - .name = "pinctrl-ab8505", + .name = "ab8500-gpio", }, { .name = "ab8500-usb", @@ -1282,7 +1271,6 @@ static struct mfd_cell ab8540_devs[] = { }, { .name = "ab8500-gpadc", - .of_compatible = "stericsson,ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8505_gpadc_resources), .resources = ab8505_gpadc_resources, }, @@ -1314,7 +1302,7 @@ static struct mfd_cell ab8540_devs[] = { .resources = ab8500_temp_resources, }, { - .name = "pinctrl-ab8540", + .name = "ab8500-gpio", }, { .name = "ab8540-usb", @@ -1724,12 +1712,6 @@ static int ab8500_probe(struct platform_device *pdev) if (ret) return ret; -#if CONFIG_DEBUG_FS - /* Pass to debugfs */ - ab8500_debug_resources[0].start = ab8500->irq; - ab8500_debug_resources[0].end = ab8500->irq; -#endif - if (is_ab9540(ab8500)) ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs, ARRAY_SIZE(ab9540_devs), NULL, diff --git a/trunk/drivers/mfd/ab8500-debugfs.c b/trunk/drivers/mfd/ab8500-debugfs.c index 37b7ce4c7c3b..b88bbbc15f1e 100644 --- a/trunk/drivers/mfd/ab8500-debugfs.c +++ b/trunk/drivers/mfd/ab8500-debugfs.c @@ -91,10 +91,12 @@ #include #endif +/* TODO: this file should not reference IRQ_DB8500_AB8500! */ +#include + static u32 debug_bank; static u32 debug_address; -static int irq_ab8500; static int irq_first; static int irq_last; static u32 *irq_count; @@ -1587,7 +1589,7 @@ void ab8500_debug_register_interrupt(int line) { if (line < num_interrupt_lines) { num_interrupts[line]++; - if (suspend_test_wake_cause_interrupt_is_mine(irq_ab8500)) + if (suspend_test_wake_cause_interrupt_is_mine(IRQ_DB8500_AB8500)) num_wake_interrupts[line]++; } } @@ -2939,7 +2941,6 @@ static int ab8500_debug_probe(struct platform_device *plf) struct dentry *file; int ret = -ENOMEM; struct ab8500 *ab8500; - struct resource *res; debug_bank = AB8500_MISC; debug_address = AB8500_REV_REG & 0x00FF; @@ -2958,15 +2959,6 @@ static int ab8500_debug_probe(struct platform_device *plf) if (!event_name) goto out_freedev_attr; - res = platform_get_resource_byname(plf, 0, "IRQ_AB8500"); - if (!res) { - dev_err(&plf->dev, "AB8500 irq not found, err %d\n", - irq_first); - ret = -ENXIO; - goto out_freeevent_name; - } - irq_ab8500 = res->start; - irq_first = platform_get_irq_byname(plf, "IRQ_FIRST"); if (irq_first < 0) { dev_err(&plf->dev, "First irq not found, err %d\n", diff --git a/trunk/drivers/mfd/ab8500-gpadc.c b/trunk/drivers/mfd/ab8500-gpadc.c index 13f7866de46e..5e65b28a5d09 100644 --- a/trunk/drivers/mfd/ab8500-gpadc.c +++ b/trunk/drivers/mfd/ab8500-gpadc.c @@ -907,17 +907,14 @@ static int ab8500_gpadc_suspend(struct device *dev) static int ab8500_gpadc_resume(struct device *dev) { struct ab8500_gpadc *gpadc = dev_get_drvdata(dev); - int ret; - ret = regulator_enable(gpadc->regu); - if (ret) - dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret); + regulator_enable(gpadc->regu); pm_runtime_mark_last_busy(gpadc->dev); pm_runtime_put_autosuspend(gpadc->dev); mutex_unlock(&gpadc->ab8500_gpadc_lock); - return ret; + return 0; } static int ab8500_gpadc_probe(struct platform_device *pdev) diff --git a/trunk/drivers/mfd/ab8500-sysctrl.c b/trunk/drivers/mfd/ab8500-sysctrl.c index 8e0dae59844d..fbca1ced49fa 100644 --- a/trunk/drivers/mfd/ab8500-sysctrl.c +++ b/trunk/drivers/mfd/ab8500-sysctrl.c @@ -23,7 +23,7 @@ static struct device *sysctrl_dev; -static void ab8500_power_off(void) +void ab8500_power_off(void) { sigset_t old; sigset_t all; @@ -104,7 +104,7 @@ void ab8500_restart(char mode, const char *cmd) plat = dev_get_platdata(sysctrl_dev->parent); pdata = plat->sysctrl; - if (pdata && pdata->reboot_reason_code) + if (pdata->reboot_reason_code) reason = pdata->reboot_reason_code(cmd); else pr_warn("[%s] No reboot reason set. Default reason %d\n", @@ -188,15 +188,14 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev) plat = dev_get_platdata(pdev->dev.parent); - if (!plat) + if (!(plat && plat->sysctrl)) return -EINVAL; - sysctrl_dev = &pdev->dev; - - if (!pm_power_off) + if (plat->pm_power_off) pm_power_off = ab8500_power_off; pdata = plat->sysctrl; + if (pdata) { int last, ret, i, j; @@ -227,10 +226,6 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev) static int ab8500_sysctrl_remove(struct platform_device *pdev) { sysctrl_dev = NULL; - - if (pm_power_off == ab8500_power_off) - pm_power_off = NULL; - return 0; } diff --git a/trunk/drivers/mfd/abx500-core.c b/trunk/drivers/mfd/abx500-core.c index 3714acb61458..9818afba2515 100644 --- a/trunk/drivers/mfd/abx500-core.c +++ b/trunk/drivers/mfd/abx500-core.c @@ -156,7 +156,7 @@ EXPORT_SYMBOL(abx500_startup_irq_enabled); void abx500_dump_all_banks(void) { struct abx500_ops *ops; - struct device dummy_child = {NULL}; + struct device dummy_child = {0}; struct abx500_device_entry *dev_entry; list_for_each_entry(dev_entry, &abx500_list, list) { diff --git a/trunk/drivers/mfd/cros_ec_spi.c b/trunk/drivers/mfd/cros_ec_spi.c index 367ccb58ecb1..19193cf1e7a1 100644 --- a/trunk/drivers/mfd/cros_ec_spi.c +++ b/trunk/drivers/mfd/cros_ec_spi.c @@ -120,7 +120,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) { if (*ptr == EC_MSG_HEADER) { - dev_dbg(ec_dev->dev, "msg found at %zd\n", + dev_dbg(ec_dev->dev, "msg found at %ld\n", ptr - ec_dev->din); break; } @@ -154,7 +154,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, * maximum-supported transfer size. */ todo = min(need_len, 256); - dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n", + dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%ld\n", todo, need_len, ptr - ec_dev->din); memset(&trans, '\0', sizeof(trans)); @@ -178,7 +178,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, need_len -= todo; } - dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din); + dev_dbg(ec_dev->dev, "loop done, ptr=%ld\n", ptr - ec_dev->din); return 0; } diff --git a/trunk/drivers/mfd/db8500-prcmu.c b/trunk/drivers/mfd/db8500-prcmu.c index 66f80973596b..319b8abe742b 100644 --- a/trunk/drivers/mfd/db8500-prcmu.c +++ b/trunk/drivers/mfd/db8500-prcmu.c @@ -1613,8 +1613,6 @@ static unsigned long dsiclk_rate(u8 n) if (divsel == PRCM_DSI_PLLOUT_SEL_OFF) divsel = dsiclk[n].divsel; - else - dsiclk[n].divsel = divsel; switch (divsel) { case PRCM_DSI_PLLOUT_SEL_PHI_4: @@ -3097,7 +3095,6 @@ static struct mfd_cell db8500_prcmu_devs[] = { .num_resources = ARRAY_SIZE(db8500_thsens_resources), .resources = db8500_thsens_resources, .platform_data = &db8500_thsens_data, - .pdata_size = sizeof(db8500_thsens_data), }, }; diff --git a/trunk/drivers/mfd/intel_msic.c b/trunk/drivers/mfd/intel_msic.c index d8d5137f9717..5be3b5e13855 100644 --- a/trunk/drivers/mfd/intel_msic.c +++ b/trunk/drivers/mfd/intel_msic.c @@ -414,6 +414,11 @@ static int intel_msic_probe(struct platform_device *pdev) * the clients via intel_msic_irq_read(). */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "failed to get SRAM iomem resource\n"); + return -ENODEV; + } + msic->irq_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(msic->irq_base)) return PTR_ERR(msic->irq_base); diff --git a/trunk/drivers/mfd/si476x-cmd.c b/trunk/drivers/mfd/si476x-cmd.c index 6f1ef63086c9..de48b4e88450 100644 --- a/trunk/drivers/mfd/si476x-cmd.c +++ b/trunk/drivers/mfd/si476x-cmd.c @@ -29,8 +29,6 @@ #include -#include - #define msb(x) ((u8)((u16) x >> 8)) #define lsb(x) ((u8)((u16) x & 0x00FF)) @@ -152,7 +150,7 @@ enum si476x_acf_status_report_bits { SI476X_ACF_SOFTMUTE_INT = (1 << 0), SI476X_ACF_SMUTE = (1 << 0), - SI476X_ACF_SMATTN = 0x1f, + SI476X_ACF_SMATTN = 0b11111, SI476X_ACF_PILOT = (1 << 7), SI476X_ACF_STBLEND = ~SI476X_ACF_PILOT, }; @@ -485,7 +483,7 @@ int si476x_core_cmd_get_property(struct si476x_core *core, u16 property) if (err < 0) return err; else - return get_unaligned_be16(resp + 2); + return be16_to_cpup((__be16 *)(resp + 2)); } EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property); @@ -774,18 +772,18 @@ int si476x_core_cmd_am_rsq_status(struct si476x_core *core, if (!report) return err; - report->snrhint = 0x08 & resp[1]; - report->snrlint = 0x04 & resp[1]; - report->rssihint = 0x02 & resp[1]; - report->rssilint = 0x01 & resp[1]; + report->snrhint = 0b00001000 & resp[1]; + report->snrlint = 0b00000100 & resp[1]; + report->rssihint = 0b00000010 & resp[1]; + report->rssilint = 0b00000001 & resp[1]; - report->bltf = 0x80 & resp[2]; - report->snr_ready = 0x20 & resp[2]; - report->rssiready = 0x08 & resp[2]; - report->afcrl = 0x02 & resp[2]; - report->valid = 0x01 & resp[2]; + report->bltf = 0b10000000 & resp[2]; + report->snr_ready = 0b00100000 & resp[2]; + report->rssiready = 0b00001000 & resp[2]; + report->afcrl = 0b00000010 & resp[2]; + report->valid = 0b00000001 & resp[2]; - report->readfreq = get_unaligned_be16(resp + 3); + report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -933,26 +931,26 @@ int si476x_core_cmd_fm_rds_status(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->rdstpptyint = 0x10 & resp[1]; - report->rdspiint = 0x08 & resp[1]; - report->rdssyncint = 0x02 & resp[1]; - report->rdsfifoint = 0x01 & resp[1]; + report->rdstpptyint = 0b00010000 & resp[1]; + report->rdspiint = 0b00001000 & resp[1]; + report->rdssyncint = 0b00000010 & resp[1]; + report->rdsfifoint = 0b00000001 & resp[1]; - report->tpptyvalid = 0x10 & resp[2]; - report->pivalid = 0x08 & resp[2]; - report->rdssync = 0x02 & resp[2]; - report->rdsfifolost = 0x01 & resp[2]; + report->tpptyvalid = 0b00010000 & resp[2]; + report->pivalid = 0b00001000 & resp[2]; + report->rdssync = 0b00000010 & resp[2]; + report->rdsfifolost = 0b00000001 & resp[2]; - report->tp = 0x20 & resp[3]; - report->pty = 0x1f & resp[3]; + report->tp = 0b00100000 & resp[3]; + report->pty = 0b00011111 & resp[3]; - report->pi = get_unaligned_be16(resp + 4); + report->pi = be16_to_cpup((__be16 *)(resp + 4)); report->rdsfifoused = resp[6]; - report->ble[V4L2_RDS_BLOCK_A] = 0xc0 & resp[7]; - report->ble[V4L2_RDS_BLOCK_B] = 0x30 & resp[7]; - report->ble[V4L2_RDS_BLOCK_C] = 0x0c & resp[7]; - report->ble[V4L2_RDS_BLOCK_D] = 0x03 & resp[7]; + report->ble[V4L2_RDS_BLOCK_A] = 0b11000000 & resp[7]; + report->ble[V4L2_RDS_BLOCK_B] = 0b00110000 & resp[7]; + report->ble[V4L2_RDS_BLOCK_C] = 0b00001100 & resp[7]; + report->ble[V4L2_RDS_BLOCK_D] = 0b00000011 & resp[7]; report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A; report->rds[V4L2_RDS_BLOCK_A].msb = resp[8]; @@ -993,9 +991,9 @@ int si476x_core_cmd_fm_rds_blockcount(struct si476x_core *core, SI476X_DEFAULT_TIMEOUT); if (!err) { - report->expected = get_unaligned_be16(resp + 2); - report->received = get_unaligned_be16(resp + 4); - report->uncorrectable = get_unaligned_be16(resp + 6); + report->expected = be16_to_cpup((__be16 *)(resp + 2)); + report->received = be16_to_cpup((__be16 *)(resp + 4)); + report->uncorrectable = be16_to_cpup((__be16 *)(resp + 6)); } return err; @@ -1007,7 +1005,7 @@ int si476x_core_cmd_fm_phase_diversity(struct si476x_core *core, { u8 resp[CMD_FM_PHASE_DIVERSITY_NRESP]; const u8 args[CMD_FM_PHASE_DIVERSITY_NARGS] = { - mode & 0x07, + mode & 0b111, }; return si476x_core_send_command(core, CMD_FM_PHASE_DIVERSITY, @@ -1164,7 +1162,7 @@ static int si476x_core_cmd_am_tune_freq_a20(struct si476x_core *core, const int am_freq = tuneargs->freq; u8 resp[CMD_AM_TUNE_FREQ_NRESP]; const u8 args[CMD_AM_TUNE_FREQ_NARGS] = { - (tuneargs->zifsr << 6) | (tuneargs->injside & 0x03), + (tuneargs->zifsr << 6) | (tuneargs->injside & 0b11), msb(am_freq), lsb(am_freq), }; @@ -1199,20 +1197,20 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->multhint = 0x80 & resp[1]; - report->multlint = 0x40 & resp[1]; - report->snrhint = 0x08 & resp[1]; - report->snrlint = 0x04 & resp[1]; - report->rssihint = 0x02 & resp[1]; - report->rssilint = 0x01 & resp[1]; + report->multhint = 0b10000000 & resp[1]; + report->multlint = 0b01000000 & resp[1]; + report->snrhint = 0b00001000 & resp[1]; + report->snrlint = 0b00000100 & resp[1]; + report->rssihint = 0b00000010 & resp[1]; + report->rssilint = 0b00000001 & resp[1]; - report->bltf = 0x80 & resp[2]; - report->snr_ready = 0x20 & resp[2]; - report->rssiready = 0x08 & resp[2]; - report->afcrl = 0x02 & resp[2]; - report->valid = 0x01 & resp[2]; + report->bltf = 0b10000000 & resp[2]; + report->snr_ready = 0b00100000 & resp[2]; + report->rssiready = 0b00001000 & resp[2]; + report->afcrl = 0b00000010 & resp[2]; + report->valid = 0b00000001 & resp[2]; - report->readfreq = get_unaligned_be16(resp + 3); + report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -1220,7 +1218,7 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core, report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; - report->readantcap = get_unaligned_be16(resp + 13); + report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); report->assi = resp[15]; report->usn = resp[16]; @@ -1253,20 +1251,20 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->multhint = 0x80 & resp[1]; - report->multlint = 0x40 & resp[1]; - report->snrhint = 0x08 & resp[1]; - report->snrlint = 0x04 & resp[1]; - report->rssihint = 0x02 & resp[1]; - report->rssilint = 0x01 & resp[1]; + report->multhint = 0b10000000 & resp[1]; + report->multlint = 0b01000000 & resp[1]; + report->snrhint = 0b00001000 & resp[1]; + report->snrlint = 0b00000100 & resp[1]; + report->rssihint = 0b00000010 & resp[1]; + report->rssilint = 0b00000001 & resp[1]; - report->bltf = 0x80 & resp[2]; - report->snr_ready = 0x20 & resp[2]; - report->rssiready = 0x08 & resp[2]; - report->afcrl = 0x02 & resp[2]; - report->valid = 0x01 & resp[2]; + report->bltf = 0b10000000 & resp[2]; + report->snr_ready = 0b00100000 & resp[2]; + report->rssiready = 0b00001000 & resp[2]; + report->afcrl = 0b00000010 & resp[2]; + report->valid = 0b00000001 & resp[2]; - report->readfreq = get_unaligned_be16(resp + 3); + report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -1274,7 +1272,7 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core, report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; - report->readantcap = get_unaligned_be16(resp + 13); + report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); report->assi = resp[15]; report->usn = resp[16]; @@ -1308,21 +1306,21 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->multhint = 0x80 & resp[1]; - report->multlint = 0x40 & resp[1]; - report->snrhint = 0x08 & resp[1]; - report->snrlint = 0x04 & resp[1]; - report->rssihint = 0x02 & resp[1]; - report->rssilint = 0x01 & resp[1]; - - report->bltf = 0x80 & resp[2]; - report->snr_ready = 0x20 & resp[2]; - report->rssiready = 0x08 & resp[2]; - report->injside = 0x04 & resp[2]; - report->afcrl = 0x02 & resp[2]; - report->valid = 0x01 & resp[2]; - - report->readfreq = get_unaligned_be16(resp + 3); + report->multhint = 0b10000000 & resp[1]; + report->multlint = 0b01000000 & resp[1]; + report->snrhint = 0b00001000 & resp[1]; + report->snrlint = 0b00000100 & resp[1]; + report->rssihint = 0b00000010 & resp[1]; + report->rssilint = 0b00000001 & resp[1]; + + report->bltf = 0b10000000 & resp[2]; + report->snr_ready = 0b00100000 & resp[2]; + report->rssiready = 0b00001000 & resp[2]; + report->injside = 0b00000100 & resp[2]; + report->afcrl = 0b00000010 & resp[2]; + report->valid = 0b00000001 & resp[2]; + + report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -1331,7 +1329,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core, report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; - report->readantcap = get_unaligned_be16(resp + 13); + report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); report->assi = resp[15]; report->usn = resp[16]; @@ -1339,7 +1337,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core, report->rdsdev = resp[18]; report->assidev = resp[19]; report->strongdev = resp[20]; - report->rdspi = get_unaligned_be16(resp + 21); + report->rdspi = be16_to_cpup((__be16 *)(resp + 21)); return err; } diff --git a/trunk/drivers/mfd/tps6586x.c b/trunk/drivers/mfd/tps6586x.c index 4b93ed4d5cd6..721b9186a5d1 100644 --- a/trunk/drivers/mfd/tps6586x.c +++ b/trunk/drivers/mfd/tps6586x.c @@ -107,7 +107,7 @@ static struct mfd_cell tps6586x_cell[] = { .name = "tps6586x-gpio", }, { - .name = "tps6586x-regulator", + .name = "tps6586x-pmic", }, { .name = "tps6586x-rtc", diff --git a/trunk/drivers/misc/atmel-ssc.c b/trunk/drivers/misc/atmel-ssc.c index 1abd5ad59925..c09c28f92055 100644 --- a/trunk/drivers/misc/atmel-ssc.c +++ b/trunk/drivers/misc/atmel-ssc.c @@ -154,6 +154,11 @@ static int ssc_probe(struct platform_device *pdev) ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_dbg(&pdev->dev, "no mmio resource defined\n"); + return -ENXIO; + } + ssc->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(ssc->regs)) return PTR_ERR(ssc->regs); diff --git a/trunk/drivers/misc/dummy-irq.c b/trunk/drivers/misc/dummy-irq.c index c37eeedfe215..7014167e2c61 100644 --- a/trunk/drivers/misc/dummy-irq.c +++ b/trunk/drivers/misc/dummy-irq.c @@ -19,7 +19,7 @@ #include #include -static int irq = -1; +static int irq; static irqreturn_t dummy_interrupt(int irq, void *dev_id) { @@ -36,10 +36,6 @@ static irqreturn_t dummy_interrupt(int irq, void *dev_id) static int __init dummy_irq_init(void) { - if (irq < 0) { - printk(KERN_ERR "dummy-irq: no IRQ given. Use irq=N\n"); - return -EIO; - } if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) { printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq); return -EIO; diff --git a/trunk/drivers/misc/mei/bus.c b/trunk/drivers/misc/mei/bus.c index 9ecd49a7be1b..1e935eacaa7f 100644 --- a/trunk/drivers/misc/mei/bus.c +++ b/trunk/drivers/misc/mei/bus.c @@ -496,8 +496,6 @@ int mei_cl_disable_device(struct mei_cl_device *device) } } - device->event_cb = NULL; - mutex_unlock(&dev->device_lock); if (!device->ops || !device->ops->disable) diff --git a/trunk/drivers/misc/mei/init.c b/trunk/drivers/misc/mei/init.c index f580d30bb784..713d89fedc46 100644 --- a/trunk/drivers/misc/mei/init.c +++ b/trunk/drivers/misc/mei/init.c @@ -197,8 +197,6 @@ void mei_stop(struct mei_device *dev) { dev_dbg(&dev->pdev->dev, "stopping the device.\n"); - flush_scheduled_work(); - mutex_lock(&dev->device_lock); cancel_delayed_work(&dev->timer_work); @@ -212,6 +210,8 @@ void mei_stop(struct mei_device *dev) mutex_unlock(&dev->device_lock); + flush_scheduled_work(); + mei_watchdog_unregister(dev); } EXPORT_SYMBOL_GPL(mei_stop); diff --git a/trunk/drivers/misc/mei/main.c b/trunk/drivers/misc/mei/main.c index 053139f61086..7c44c8dbae42 100644 --- a/trunk/drivers/misc/mei/main.c +++ b/trunk/drivers/misc/mei/main.c @@ -489,16 +489,11 @@ static int mei_ioctl_connect_client(struct file *file, /* find ME client we're trying to connect to */ i = mei_me_cl_by_uuid(dev, &data->in_client_uuid); - if (i < 0 || dev->me_clients[i].props.fixed_address) { - dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n", - &data->in_client_uuid); - rets = -ENODEV; - goto end; + if (i >= 0 && !dev->me_clients[i].props.fixed_address) { + cl->me_client_id = dev->me_clients[i].client_id; + cl->state = MEI_FILE_CONNECTING; } - cl->me_client_id = dev->me_clients[i].client_id; - cl->state = MEI_FILE_CONNECTING; - dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", cl->me_client_id); dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n", @@ -532,6 +527,11 @@ static int mei_ioctl_connect_client(struct file *file, goto end; } + if (cl->state != MEI_FILE_CONNECTING) { + rets = -ENODEV; + goto end; + } + /* prepare the output buffer */ client = &data->out_client_properties; @@ -543,6 +543,7 @@ static int mei_ioctl_connect_client(struct file *file, rets = mei_cl_connect(cl, file); end: + dev_dbg(&dev->pdev->dev, "free connect cb memory."); return rets; } diff --git a/trunk/drivers/misc/mei/nfc.c b/trunk/drivers/misc/mei/nfc.c index d0c6907dfd92..3adf8a70f26e 100644 --- a/trunk/drivers/misc/mei/nfc.c +++ b/trunk/drivers/misc/mei/nfc.c @@ -142,8 +142,6 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev) mei_cl_unlink(ndev->cl_info); kfree(ndev->cl_info); } - - memset(ndev, 0, sizeof(struct mei_nfc_dev)); } static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) diff --git a/trunk/drivers/misc/mei/pci-me.c b/trunk/drivers/misc/mei/pci-me.c index 0f268329bd3a..a727464e9c3f 100644 --- a/trunk/drivers/misc/mei/pci-me.c +++ b/trunk/drivers/misc/mei/pci-me.c @@ -325,7 +325,6 @@ static int mei_me_pci_resume(struct device *device) mutex_lock(&dev->device_lock); dev->dev_state = MEI_DEV_POWER_UP; - mei_clear_interrupts(dev); mei_reset(dev, 1); mutex_unlock(&dev->device_lock); diff --git a/trunk/drivers/misc/sgi-gru/grufile.c b/trunk/drivers/misc/sgi-gru/grufile.c index 0535d1e0bc78..44d273c5e19d 100644 --- a/trunk/drivers/misc/sgi-gru/grufile.c +++ b/trunk/drivers/misc/sgi-gru/grufile.c @@ -172,7 +172,6 @@ static long gru_get_config_info(unsigned long arg) nodesperblade = 2; else nodesperblade = 1; - memset(&info, 0, sizeof(info)); info.cpus = num_online_cpus(); info.nodes = num_online_nodes(); info.blades = info.nodes / nodesperblade; diff --git a/trunk/drivers/misc/vmw_vmci/Kconfig b/trunk/drivers/misc/vmw_vmci/Kconfig index 39c2ecadb273..ea98f7e9ccd1 100644 --- a/trunk/drivers/misc/vmw_vmci/Kconfig +++ b/trunk/drivers/misc/vmw_vmci/Kconfig @@ -4,7 +4,7 @@ config VMWARE_VMCI tristate "VMware VMCI Driver" - depends on X86 && PCI + depends on X86 && PCI && NET help This is VMware's Virtual Machine Communication Interface. It enables high-speed communication between host and guest in a virtual diff --git a/trunk/drivers/misc/vmw_vmci/vmci_queue_pair.c b/trunk/drivers/misc/vmw_vmci/vmci_queue_pair.c index 8ff2e5ee8fb8..d94245dbd765 100644 --- a/trunk/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/trunk/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/trunk/drivers/mmc/host/atmel-mci.c b/trunk/drivers/mmc/host/atmel-mci.c index aca59d93d5a9..e75774f72606 100644 --- a/trunk/drivers/mmc/host/atmel-mci.c +++ b/trunk/drivers/mmc/host/atmel-mci.c @@ -2230,15 +2230,10 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, mmc_free_host(slot->mmc); } -static bool atmci_filter(struct dma_chan *chan, void *pdata) +static bool atmci_filter(struct dma_chan *chan, void *slave) { - struct mci_platform_data *sl_pdata = pdata; - struct mci_dma_data *sl; + struct mci_dma_data *sl = slave; - if (!sl_pdata) - return false; - - sl = sl_pdata->dma_slave; if (sl && find_slave_dev(sl) == chan->device->dev) { chan->private = slave_data_ptr(sl); return true; @@ -2250,18 +2245,24 @@ static bool atmci_filter(struct dma_chan *chan, void *pdata) static bool atmci_configure_dma(struct atmel_mci *host) { struct mci_platform_data *pdata; - dma_cap_mask_t mask; if (host == NULL) return false; pdata = host->pdev->dev.platform_data; - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); + if (!pdata) + return false; - host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata, - &host->pdev->dev, "rxtx"); + if (pdata->dma_slave && find_slave_dev(pdata->dma_slave)) { + dma_cap_mask_t mask; + + /* Try to grab a DMA channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + host->dma.chan = + dma_request_channel(mask, atmci_filter, pdata->dma_slave); + } if (!host->dma.chan) { dev_warn(&host->pdev->dev, "no DMA channel available\n"); return false; diff --git a/trunk/drivers/mmc/host/mmci.c b/trunk/drivers/mmc/host/mmci.c index f4f3038c1df0..375c109607ff 100644 --- a/trunk/drivers/mmc/host/mmci.c +++ b/trunk/drivers/mmc/host/mmci.c @@ -1130,7 +1130,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) struct variant_data *variant = host->variant; u32 pwr = 0; unsigned long flags; - int ret; pm_runtime_get_sync(mmc_dev(mmc)); @@ -1162,12 +1161,8 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) break; case MMC_POWER_ON: if (!IS_ERR(mmc->supply.vqmmc) && - !regulator_is_enabled(mmc->supply.vqmmc)) { - ret = regulator_enable(mmc->supply.vqmmc); - if (ret < 0) - dev_err(mmc_dev(mmc), - "failed to enable vqmmc regulator\n"); - } + !regulator_is_enabled(mmc->supply.vqmmc)) + regulator_enable(mmc->supply.vqmmc); pwr |= MCI_PWR_ON; break; diff --git a/trunk/drivers/mmc/host/omap_hsmmc.c b/trunk/drivers/mmc/host/omap_hsmmc.c index eccedc7d06a4..6e44025acf01 100644 --- a/trunk/drivers/mmc/host/omap_hsmmc.c +++ b/trunk/drivers/mmc/host/omap_hsmmc.c @@ -161,7 +161,6 @@ struct omap_hsmmc_host { */ struct regulator *vcc; struct regulator *vcc_aux; - int pbias_disable; void __iomem *base; resource_size_t mapbase; spinlock_t irq_lock; /* Prevent races with irq handler */ @@ -256,11 +255,11 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on, if (!host->vcc) return 0; /* - * With DT, never turn OFF the regulator for MMC1. This is because + * With DT, never turn OFF the regulator. This is because * the pbias cell programming support is still missing when * booting with Device tree */ - if (host->pbias_disable && !vdd) + if (dev->of_node && !vdd) return 0; if (mmc_slot(host).before_set_reg) @@ -1521,10 +1520,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) (ios->vdd == DUAL_VOLT_OCR_BIT) && /* * With pbias cell programming missing, this - * can't be allowed on MMC1 when booting with device + * can't be allowed when booting with device * tree. */ - !host->pbias_disable) { + !host->dev->of_node) { /* * The mmc_select_voltage fn of the core does * not seem to set the power_mode to @@ -1872,10 +1871,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) omap_hsmmc_context_save(host); - /* This can be removed once we support PBIAS with DT */ - if (host->dev->of_node && host->mapbase == 0x4809c000) - host->pbias_disable = 1; - host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); /* * MMC can still work without debounce clock. @@ -1911,41 +1906,33 @@ static int omap_hsmmc_probe(struct platform_device *pdev) omap_hsmmc_conf_bus_power(host); - if (!pdev->dev.of_node) { - res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); - if (!res) { - dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); - ret = -ENXIO; - goto err_irq; - } - tx_req = res->start; + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); + if (!res) { + dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); + ret = -ENXIO; + goto err_irq; + } + tx_req = res->start; - res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); - if (!res) { - dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); - ret = -ENXIO; - goto err_irq; - } - rx_req = res->start; + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); + if (!res) { + dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); + ret = -ENXIO; + goto err_irq; } + rx_req = res->start; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - host->rx_chan = - dma_request_slave_channel_compat(mask, omap_dma_filter_fn, - &rx_req, &pdev->dev, "rx"); - + host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req); if (!host->rx_chan) { dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); ret = -ENXIO; goto err_irq; } - host->tx_chan = - dma_request_slave_channel_compat(mask, omap_dma_filter_fn, - &tx_req, &pdev->dev, "tx"); - + host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req); if (!host->tx_chan) { dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); ret = -ENXIO; diff --git a/trunk/drivers/mmc/host/sdhci-acpi.c b/trunk/drivers/mmc/host/sdhci-acpi.c index 706d9cb1a49e..7bcf74b1a5cd 100644 --- a/trunk/drivers/mmc/host/sdhci-acpi.c +++ b/trunk/drivers/mmc/host/sdhci-acpi.c @@ -87,12 +87,6 @@ static const struct sdhci_ops sdhci_acpi_ops_dflt = { .enable_dma = sdhci_acpi_enable_dma, }; -static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { - .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, - .caps2 = MMC_CAP2_HC_ERASE_SZ, - .flags = SDHCI_ACPI_RUNTIME_PM, -}; - static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD, @@ -100,65 +94,21 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { .pm_caps = MMC_PM_KEEP_POWER, }; -static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { -}; - -struct sdhci_acpi_uid_slot { - const char *hid; - const char *uid; - const struct sdhci_acpi_slot *slot; -}; - -static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { - { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc }, - { "80860F14" , "3" , &sdhci_acpi_slot_int_sd }, - { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio }, - { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, - { "PNP0D40" }, - { }, -}; - static const struct acpi_device_id sdhci_acpi_ids[] = { - { "80860F14" }, - { "INT33BB" }, - { "INT33C6" }, - { "PNP0D40" }, + { "INT33C6", (kernel_ulong_t)&sdhci_acpi_slot_int_sdio }, + { "PNP0D40" }, { }, }; MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); -static const struct sdhci_acpi_slot *sdhci_acpi_get_slot_by_ids(const char *hid, - const char *uid) +static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid) { - const struct sdhci_acpi_uid_slot *u; - - for (u = sdhci_acpi_uids; u->hid; u++) { - if (strcmp(u->hid, hid)) - continue; - if (!u->uid) - return u->slot; - if (uid && !strcmp(u->uid, uid)) - return u->slot; - } - return NULL; -} + const struct acpi_device_id *id; -static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(acpi_handle handle, - const char *hid) -{ - const struct sdhci_acpi_slot *slot; - struct acpi_device_info *info; - const char *uid = NULL; - acpi_status status; - - status = acpi_get_object_info(handle, &info); - if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID)) - uid = info->unique_id.string; - - slot = sdhci_acpi_get_slot_by_ids(hid, uid); - - kfree(info); - return slot; + for (id = sdhci_acpi_ids; id->id[0]; id++) + if (!strcmp(id->id, hid)) + return (const struct sdhci_acpi_slot *)id->driver_data; + return NULL; } static int sdhci_acpi_probe(struct platform_device *pdev) @@ -198,7 +148,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) c = sdhci_priv(host); c->host = host; - c->slot = sdhci_acpi_get_slot(handle, hid); + c->slot = sdhci_acpi_get_slot(hid); c->pdev = pdev; c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); @@ -252,7 +202,6 @@ static int sdhci_acpi_probe(struct platform_device *pdev) goto err_free; if (c->use_runtime_pm) { - pm_runtime_set_active(dev); pm_suspend_ignore_children(dev, 1); pm_runtime_set_autosuspend_delay(dev, 50); pm_runtime_use_autosuspend(dev); diff --git a/trunk/drivers/mmc/host/sdhci-esdhc-imx.c b/trunk/drivers/mmc/host/sdhci-esdhc-imx.c index d5f0d59e1310..67d6dde2ff19 100644 --- a/trunk/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/trunk/drivers/mmc/host/sdhci-esdhc-imx.c @@ -85,12 +85,6 @@ struct pltfm_imx_data { struct clk *clk_ipg; struct clk *clk_ahb; struct clk *clk_per; - enum { - NO_CMD_PENDING, /* no multiblock command pending*/ - MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ - WAIT_FOR_INT, /* sent CMD12, waiting for response INT */ - } multiblock_status; - }; static struct platform_device_id imx_esdhc_devtype[] = { @@ -160,8 +154,6 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i static u32 esdhc_readl_le(struct sdhci_host *host, int reg) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct pltfm_imx_data *imx_data = pltfm_host->priv; u32 val = readl(host->ioaddr + reg); if (unlikely(reg == SDHCI_CAPABILITIES)) { @@ -183,18 +175,6 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; val |= SDHCI_INT_ADMA_ERROR; } - - /* - * mask off the interrupt we get in response to the manually - * sent CMD12 - */ - if ((imx_data->multiblock_status == WAIT_FOR_INT) && - ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) { - val &= ~SDHCI_INT_RESPONSE; - writel(SDHCI_INT_RESPONSE, host->ioaddr + - SDHCI_INT_STATUS); - imx_data->multiblock_status = NO_CMD_PENDING; - } } return val; @@ -231,15 +211,6 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK; writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); - - if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS) - { - /* send a manual CMD12 with RESPTYP=none */ - data = MMC_STOP_TRANSMISSION << 24 | - SDHCI_CMD_ABORTCMD << 16; - writel(data, host->ioaddr + SDHCI_TRANSFER_MODE); - imx_data->multiblock_status = WAIT_FOR_INT; - } } if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { @@ -306,13 +277,11 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) } return; case SDHCI_COMMAND: - if (host->cmd->opcode == MMC_STOP_TRANSMISSION) + if ((host->cmd->opcode == MMC_STOP_TRANSMISSION || + host->cmd->opcode == MMC_SET_BLOCK_COUNT) && + (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) val |= SDHCI_CMD_ABORTCMD; - if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) && - (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) - imx_data->multiblock_status = MULTIBLK_IN_PROCESS; - if (is_imx6q_usdhc(imx_data)) writel(val << 16, host->ioaddr + SDHCI_TRANSFER_MODE); @@ -355,10 +324,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) /* * Do not touch buswidth bits here. This is done in * esdhc_pltfm_bus_width. - * Do not touch the D3CD bit either which is used for the - * SDIO interrupt errata workaround. */ - mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD); + mask = 0xffff & ~ESDHC_CTRL_BUSWIDTH_MASK; esdhc_clrset_le(host, mask, new_val, reg); return; diff --git a/trunk/drivers/mmc/host/sdhci-pci.c b/trunk/drivers/mmc/host/sdhci-pci.c index 701d06d0e1fb..0012d3fdc999 100644 --- a/trunk/drivers/mmc/host/sdhci-pci.c +++ b/trunk/drivers/mmc/host/sdhci-pci.c @@ -33,9 +33,6 @@ */ #define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 #define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a -#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14 -#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15 -#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16 /* * PCI registers @@ -307,33 +304,6 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { .probe_slot = pch_hc_probe_slot, }; -static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) -{ - slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; - slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; - return 0; -} - -static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) -{ - slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; - return 0; -} - -static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { - .allow_runtime_pm = true, - .probe_slot = byt_emmc_probe_slot, -}; - -static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { - .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, - .allow_runtime_pm = true, - .probe_slot = byt_sdio_probe_slot, -}; - -static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { -}; - /* O2Micro extra registers */ #define O2_SD_LOCK_WP 0xD3 #define O2_SD_MULTI_VCC3V 0xEE @@ -885,30 +855,6 @@ static const struct pci_device_id pci_ids[] = { .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, }, - { - .vendor = PCI_VENDOR_ID_INTEL, - .device = PCI_DEVICE_ID_INTEL_BYT_EMMC, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, - }, - - { - .vendor = PCI_VENDOR_ID_INTEL, - .device = PCI_DEVICE_ID_INTEL_BYT_SDIO, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio, - }, - - { - .vendor = PCI_VENDOR_ID_INTEL, - .device = PCI_DEVICE_ID_INTEL_BYT_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd, - }, - { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8120, diff --git a/trunk/drivers/mtd/nand/lpc32xx_mlc.c b/trunk/drivers/mtd/nand/lpc32xx_mlc.c index fd1df5e13ae4..a94facb46e5c 100644 --- a/trunk/drivers/mtd/nand/lpc32xx_mlc.c +++ b/trunk/drivers/mtd/nand/lpc32xx_mlc.c @@ -672,6 +672,11 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) } rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (rc == NULL) { + dev_err(&pdev->dev, "No memory resource found for device!\r\n"); + return -ENXIO; + } + host->io_base = devm_ioremap_resource(&pdev->dev, rc); if (IS_ERR(host->io_base)) return PTR_ERR(host->io_base); diff --git a/trunk/drivers/net/bonding/bond_3ad.c b/trunk/drivers/net/bonding/bond_3ad.c index 390061d09693..fc58d118d844 100644 --- a/trunk/drivers/net/bonding/bond_3ad.c +++ b/trunk/drivers/net/bonding/bond_3ad.c @@ -2360,15 +2360,14 @@ int bond_3ad_set_carrier(struct bonding *bond) } /** - * __bond_3ad_get_active_agg_info - get information of the active aggregator + * bond_3ad_get_active_agg_info - get information of the active aggregator * @bond: bonding struct to work on * @ad_info: ad_info struct to fill with the bond's info * * Returns: 0 on success * < 0 on error */ -int __bond_3ad_get_active_agg_info(struct bonding *bond, - struct ad_info *ad_info) +int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) { struct aggregator *aggregator = NULL; struct port *port; @@ -2392,18 +2391,6 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond, return -1; } -/* Wrapper used to hold bond->lock so no slave manipulation can occur */ -int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) -{ - int ret; - - read_lock(&bond->lock); - ret = __bond_3ad_get_active_agg_info(bond, ad_info); - read_unlock(&bond->lock); - - return ret; -} - int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) { struct slave *slave, *start_at; @@ -2415,8 +2402,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) struct ad_info ad_info; int res = 1; - if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { - pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n", + if (bond_3ad_get_active_agg_info(bond, &ad_info)) { + pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n", dev->name); goto out; } diff --git a/trunk/drivers/net/bonding/bond_3ad.h b/trunk/drivers/net/bonding/bond_3ad.h index 5d91ad0cc041..0cfaa4afdece 100644 --- a/trunk/drivers/net/bonding/bond_3ad.h +++ b/trunk/drivers/net/bonding/bond_3ad.h @@ -273,8 +273,6 @@ void bond_3ad_adapter_speed_changed(struct slave *slave); void bond_3ad_adapter_duplex_changed(struct slave *slave); void bond_3ad_handle_link_change(struct slave *slave, char link); int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); -int __bond_3ad_get_active_agg_info(struct bonding *bond, - struct ad_info *ad_info); int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index f97569613526..d0aade04e49a 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -764,8 +764,8 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) struct net_device *bond_dev, *vlan_dev, *upper_dev; struct vlan_entry *vlan; - read_lock(&bond->lock); rcu_read_lock(); + read_lock(&bond->lock); bond_dev = bond->dev; @@ -787,19 +787,12 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) if (vlan_dev) __bond_resend_igmp_join_requests(vlan_dev); } - rcu_read_unlock(); - /* We use curr_slave_lock to protect against concurrent access to - * igmp_retrans from multiple running instances of this function and - * bond_change_active_slave - */ - write_lock_bh(&bond->curr_slave_lock); - if (bond->igmp_retrans > 1) { - bond->igmp_retrans--; + if (--bond->igmp_retrans > 0) queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); - } - write_unlock_bh(&bond->curr_slave_lock); + read_unlock(&bond->lock); + rcu_read_unlock(); } static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) @@ -1369,7 +1362,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev, slave->dev->features, mask); } - features = netdev_add_tso_features(features, mask); out: read_unlock(&bond->lock); @@ -1964,10 +1956,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) err_undo_flags: bond_compute_features(bond); - /* Enslave of first slave has failed and we need to fix master's mac */ - if (bond->slave_cnt == 0 && - ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr)) - eth_hw_addr_random(bond_dev); return res; } @@ -2413,8 +2401,7 @@ static void bond_miimon_commit(struct bonding *bond) pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", bond->dev->name, slave->dev->name, - slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, - slave->duplex ? "full" : "half"); + slave->speed, slave->duplex ? "full" : "half"); /* notify ad that the link status has changed */ if (bond->params.mode == BOND_MODE_8023AD) @@ -2568,8 +2555,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ { struct sk_buff *skb; - pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op, - slave_dev->name, &dest_ip, &src_ip, vlan_id); + pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op, + slave_dev->name, dest_ip, src_ip, vlan_id); skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, NULL, slave_dev->dev_addr, NULL); @@ -2601,7 +2588,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) __be32 addr; if (!targets[i]) break; - pr_debug("basa: target %pI4\n", &targets[i]); + pr_debug("basa: target %x\n", targets[i]); if (!bond_vlan_used(bond)) { pr_debug("basa: empty vlan: arp_send\n"); addr = bond_confirm_addr(bond->dev, targets[i], 0); @@ -4483,7 +4470,7 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl) static int bond_check_params(struct bond_params *params) { - int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; + int arp_validate_value, fail_over_mac_value, primary_reselect_value; /* * Convert string parameters. @@ -4663,18 +4650,19 @@ static int bond_check_params(struct bond_params *params) arp_interval = BOND_LINK_ARP_INTERV; } - for (arp_ip_count = 0, i = 0; - (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { + for (arp_ip_count = 0; + (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[arp_ip_count]; + arp_ip_count++) { /* not complete check, but should be good enough to catch mistakes */ - __be32 ip = in_aton(arp_ip_target[i]); - if (!isdigit(arp_ip_target[i][0]) || ip == 0 || - ip == htonl(INADDR_BROADCAST)) { + __be32 ip = in_aton(arp_ip_target[arp_ip_count]); + if (!isdigit(arp_ip_target[arp_ip_count][0]) || + ip == 0 || ip == htonl(INADDR_BROADCAST)) { pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", - arp_ip_target[i]); + arp_ip_target[arp_ip_count]); arp_interval = 0; } else { - arp_target[arp_ip_count++] = ip; + arp_target[arp_ip_count] = ip; } } @@ -4708,6 +4696,8 @@ static int bond_check_params(struct bond_params *params) if (miimon) { pr_info("MII link monitoring set to %d ms\n", miimon); } else if (arp_interval) { + int i; + pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):", arp_interval, arp_validate_tbl[arp_validate_value].modename, diff --git a/trunk/drivers/net/bonding/bond_procfs.c b/trunk/drivers/net/bonding/bond_procfs.c index 4060d41f0ee7..94d06f1307b8 100644 --- a/trunk/drivers/net/bonding/bond_procfs.c +++ b/trunk/drivers/net/bonding/bond_procfs.c @@ -130,7 +130,7 @@ static void bond_info_show_master(struct seq_file *seq) seq_printf(seq, "Aggregator selection policy (ad_select): %s\n", ad_select_tbl[bond->params.ad_select].modename); - if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { + if (bond_3ad_get_active_agg_info(bond, &ad_info)) { seq_printf(seq, "bond %s has no active aggregator\n", bond->dev->name); } else { diff --git a/trunk/drivers/net/bonding/bond_sysfs.c b/trunk/drivers/net/bonding/bond_sysfs.c index d7434e0a610e..ea7a388f4843 100644 --- a/trunk/drivers/net/bonding/bond_sysfs.c +++ b/trunk/drivers/net/bonding/bond_sysfs.c @@ -316,9 +316,6 @@ static ssize_t bonding_store_mode(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); - if (!rtnl_trylock()) - return restart_syscall(); - if (bond->dev->flags & IFF_UP) { pr_err("unable to update mode of %s because interface is up.\n", bond->dev->name); @@ -355,7 +352,6 @@ static ssize_t bonding_store_mode(struct device *d, bond->dev->name, bond_mode_tbl[new_value].modename, new_value); out: - rtnl_unlock(); return ret; } static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, @@ -1319,6 +1315,7 @@ static ssize_t bonding_show_mii_status(struct device *d, } static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); + /* * Show current 802.3ad aggregator ID. */ @@ -1332,7 +1329,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - bond_3ad_get_active_agg_info(bond, &ad_info) + (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.aggregator_id); } @@ -1354,7 +1351,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - bond_3ad_get_active_agg_info(bond, &ad_info) + (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.ports); } @@ -1376,7 +1373,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - bond_3ad_get_active_agg_info(bond, &ad_info) + (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.actor_key); } @@ -1398,7 +1395,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - bond_3ad_get_active_agg_info(bond, &ad_info) + (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.partner_key); } diff --git a/trunk/drivers/net/bonding/bonding.h b/trunk/drivers/net/bonding/bonding.h index f989e1529a29..2baec24388b1 100644 --- a/trunk/drivers/net/bonding/bonding.h +++ b/trunk/drivers/net/bonding/bonding.h @@ -225,7 +225,7 @@ struct bonding { rwlock_t curr_slave_lock; u8 send_peer_notif; s8 setup_by_slave; - u8 igmp_retrans; + s8 igmp_retrans; #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_entry; char proc_file_name[IFNAMSIZ]; diff --git a/trunk/drivers/net/caif/Kconfig b/trunk/drivers/net/caif/Kconfig index 547098086773..7ffc756131a2 100644 --- a/trunk/drivers/net/caif/Kconfig +++ b/trunk/drivers/net/caif/Kconfig @@ -43,7 +43,7 @@ config CAIF_HSI config CAIF_VIRTIO tristate "CAIF virtio transport driver" - depends on CAIF && HAS_DMA + depends on CAIF select VHOST_RING select VIRTIO select GENERIC_ALLOCATOR diff --git a/trunk/drivers/net/can/usb/esd_usb2.c b/trunk/drivers/net/can/usb/esd_usb2.c index 6aa7b3266c80..9b74d1e3ad44 100644 --- a/trunk/drivers/net/can/usb/esd_usb2.c +++ b/trunk/drivers/net/can/usb/esd_usb2.c @@ -612,15 +612,9 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv) { struct esd_usb2 *dev = priv->usb2; struct net_device *netdev = priv->netdev; - struct esd_usb2_msg *msg; + struct esd_usb2_msg msg; int err, i; - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) { - err = -ENOMEM; - goto out; - } - /* * Enable all IDs * The IDADD message takes up to 64 32 bit bitmasks (2048 bits). @@ -634,32 +628,33 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv) * the number of the starting bitmask (0..64) to the filter.option * field followed by only some bitmasks. */ - msg->msg.hdr.cmd = CMD_IDADD; - msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; - msg->msg.filter.net = priv->index; - msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ + msg.msg.hdr.cmd = CMD_IDADD; + msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; + msg.msg.filter.net = priv->index; + msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ for (i = 0; i < ESD_MAX_ID_SEGMENT; i++) - msg->msg.filter.mask[i] = cpu_to_le32(0xffffffff); + msg.msg.filter.mask[i] = cpu_to_le32(0xffffffff); /* enable 29bit extended IDs */ - msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001); + msg.msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001); - err = esd_usb2_send_msg(dev, msg); + err = esd_usb2_send_msg(dev, &msg); if (err) - goto out; + goto failed; err = esd_usb2_setup_rx_urbs(dev); if (err) - goto out; + goto failed; priv->can.state = CAN_STATE_ERROR_ACTIVE; -out: + return 0; + +failed: if (err == -ENODEV) netif_device_detach(netdev); - if (err) - netdev_err(netdev, "couldn't start device: %d\n", err); - kfree(msg); + netdev_err(netdev, "couldn't start device: %d\n", err); + return err; } @@ -838,30 +833,26 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, static int esd_usb2_close(struct net_device *netdev) { struct esd_usb2_net_priv *priv = netdev_priv(netdev); - struct esd_usb2_msg *msg; + struct esd_usb2_msg msg; int i; - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) - return -ENOMEM; - /* Disable all IDs (see esd_usb2_start()) */ - msg->msg.hdr.cmd = CMD_IDADD; - msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; - msg->msg.filter.net = priv->index; - msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ + msg.msg.hdr.cmd = CMD_IDADD; + msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; + msg.msg.filter.net = priv->index; + msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++) - msg->msg.filter.mask[i] = 0; - if (esd_usb2_send_msg(priv->usb2, msg) < 0) + msg.msg.filter.mask[i] = 0; + if (esd_usb2_send_msg(priv->usb2, &msg) < 0) netdev_err(netdev, "sending idadd message failed\n"); /* set CAN controller to reset mode */ - msg->msg.hdr.len = 2; - msg->msg.hdr.cmd = CMD_SETBAUD; - msg->msg.setbaud.net = priv->index; - msg->msg.setbaud.rsvd = 0; - msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE); - if (esd_usb2_send_msg(priv->usb2, msg) < 0) + msg.msg.hdr.len = 2; + msg.msg.hdr.cmd = CMD_SETBAUD; + msg.msg.setbaud.net = priv->index; + msg.msg.setbaud.rsvd = 0; + msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE); + if (esd_usb2_send_msg(priv->usb2, &msg) < 0) netdev_err(netdev, "sending setbaud message failed\n"); priv->can.state = CAN_STATE_STOPPED; @@ -870,8 +861,6 @@ static int esd_usb2_close(struct net_device *netdev) close_candev(netdev); - kfree(msg); - return 0; } @@ -897,8 +886,7 @@ static int esd_usb2_set_bittiming(struct net_device *netdev) { struct esd_usb2_net_priv *priv = netdev_priv(netdev); struct can_bittiming *bt = &priv->can.bittiming; - struct esd_usb2_msg *msg; - int err; + struct esd_usb2_msg msg; u32 canbtr; int sjw_shift; @@ -924,22 +912,15 @@ static int esd_usb2_set_bittiming(struct net_device *netdev) if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) canbtr |= ESD_USB2_3_SAMPLES; - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) - return -ENOMEM; - - msg->msg.hdr.len = 2; - msg->msg.hdr.cmd = CMD_SETBAUD; - msg->msg.setbaud.net = priv->index; - msg->msg.setbaud.rsvd = 0; - msg->msg.setbaud.baud = cpu_to_le32(canbtr); + msg.msg.hdr.len = 2; + msg.msg.hdr.cmd = CMD_SETBAUD; + msg.msg.setbaud.net = priv->index; + msg.msg.setbaud.rsvd = 0; + msg.msg.setbaud.baud = cpu_to_le32(canbtr); netdev_info(netdev, "setting BTR=%#x\n", canbtr); - err = esd_usb2_send_msg(priv->usb2, msg); - - kfree(msg); - return err; + return esd_usb2_send_msg(priv->usb2, &msg); } static int esd_usb2_get_berr_counter(const struct net_device *netdev, @@ -1041,7 +1022,7 @@ static int esd_usb2_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct esd_usb2 *dev; - struct esd_usb2_msg *msg; + struct esd_usb2_msg msg; int i, err; dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -1056,33 +1037,27 @@ static int esd_usb2_probe(struct usb_interface *intf, usb_set_intfdata(intf, dev); - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) { - err = -ENOMEM; - goto free_msg; - } - /* query number of CAN interfaces (nets) */ - msg->msg.hdr.cmd = CMD_VERSION; - msg->msg.hdr.len = 2; - msg->msg.version.rsvd = 0; - msg->msg.version.flags = 0; - msg->msg.version.drv_version = 0; + msg.msg.hdr.cmd = CMD_VERSION; + msg.msg.hdr.len = 2; + msg.msg.version.rsvd = 0; + msg.msg.version.flags = 0; + msg.msg.version.drv_version = 0; - err = esd_usb2_send_msg(dev, msg); + err = esd_usb2_send_msg(dev, &msg); if (err < 0) { dev_err(&intf->dev, "sending version message failed\n"); - goto free_msg; + goto free_dev; } - err = esd_usb2_wait_msg(dev, msg); + err = esd_usb2_wait_msg(dev, &msg); if (err < 0) { dev_err(&intf->dev, "no version message answer\n"); - goto free_msg; + goto free_dev; } - dev->net_count = (int)msg->msg.version_reply.nets; - dev->version = le32_to_cpu(msg->msg.version_reply.version); + dev->net_count = (int)msg.msg.version_reply.nets; + dev->version = le32_to_cpu(msg.msg.version_reply.version); if (device_create_file(&intf->dev, &dev_attr_firmware)) dev_err(&intf->dev, @@ -1100,10 +1075,10 @@ static int esd_usb2_probe(struct usb_interface *intf, for (i = 0; i < dev->net_count; i++) esd_usb2_probe_one_net(intf, i); -free_msg: - kfree(msg); - if (err) - kfree(dev); + return 0; + +free_dev: + kfree(dev); done: return err; } diff --git a/trunk/drivers/net/can/usb/kvaser_usb.c b/trunk/drivers/net/can/usb/kvaser_usb.c index 3b9546588240..45cb9f3c1324 100644 --- a/trunk/drivers/net/can/usb/kvaser_usb.c +++ b/trunk/drivers/net/can/usb/kvaser_usb.c @@ -136,9 +136,6 @@ #define KVASER_CTRL_MODE_SELFRECEPTION 3 #define KVASER_CTRL_MODE_OFF 4 -/* log message */ -#define KVASER_EXTENDED_FRAME BIT(31) - struct kvaser_msg_simple { u8 tid; u8 channel; @@ -820,13 +817,8 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, priv = dev->nets[channel]; stats = &priv->netdev->stats; - if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) && - (msg->id == CMD_LOG_MESSAGE)) { - kvaser_usb_rx_error(dev, msg); - return; - } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | - MSG_FLAG_NERR | - MSG_FLAG_OVERRUN)) { + if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR | + MSG_FLAG_OVERRUN)) { kvaser_usb_rx_can_err(priv, msg); return; } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) { @@ -842,41 +834,23 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, return; } - if (msg->id == CMD_LOG_MESSAGE) { - cf->can_id = le32_to_cpu(msg->u.log_message.id); - if (cf->can_id & KVASER_EXTENDED_FRAME) - cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; - else - cf->can_id &= CAN_SFF_MASK; - - cf->can_dlc = get_can_dlc(msg->u.log_message.dlc); - - if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME) - cf->can_id |= CAN_RTR_FLAG; - else - memcpy(cf->data, &msg->u.log_message.data, - cf->can_dlc); - } else { - cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) | - (msg->u.rx_can.msg[1] & 0x3f); - - if (msg->id == CMD_RX_EXT_MESSAGE) { - cf->can_id <<= 18; - cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) | - ((msg->u.rx_can.msg[3] & 0xff) << 6) | - (msg->u.rx_can.msg[4] & 0x3f); - cf->can_id |= CAN_EFF_FLAG; - } + cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) | + (msg->u.rx_can.msg[1] & 0x3f); + cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]); - cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]); - - if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME) - cf->can_id |= CAN_RTR_FLAG; - else - memcpy(cf->data, &msg->u.rx_can.msg[6], - cf->can_dlc); + if (msg->id == CMD_RX_EXT_MESSAGE) { + cf->can_id <<= 18; + cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) | + ((msg->u.rx_can.msg[3] & 0xff) << 6) | + (msg->u.rx_can.msg[4] & 0x3f); + cf->can_id |= CAN_EFF_FLAG; } + if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc); + netif_rx(skb); stats->rx_packets++; @@ -937,7 +911,6 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev, case CMD_RX_STD_MESSAGE: case CMD_RX_EXT_MESSAGE: - case CMD_LOG_MESSAGE: kvaser_usb_rx_can_msg(dev, msg); break; @@ -946,6 +919,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev, kvaser_usb_rx_error(dev, msg); break; + case CMD_LOG_MESSAGE: + if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME) + kvaser_usb_rx_error(dev, msg); + break; + case CMD_TX_ACKNOWLEDGE: kvaser_usb_tx_acknowledge(dev, msg); break; diff --git a/trunk/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/trunk/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 8ee9d1556e6e..30d79bfa5b10 100644 --- a/trunk/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/trunk/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -504,24 +504,15 @@ static int pcan_usb_pro_restart_async(struct peak_usb_device *dev, return usb_submit_urb(urb, GFP_ATOMIC); } -static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) +static void pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) { - u8 *buffer; - int err; - - buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL); - if (!buffer) - return -ENOMEM; + u8 buffer[16]; buffer[0] = 0; buffer[1] = !!loaded; - err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT, - PCAN_USBPRO_FCT_DRVLD, buffer, - PCAN_USBPRO_FCT_DRVLD_REQ_LEN); - kfree(buffer); - - return err; + pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT, + PCAN_USBPRO_FCT_DRVLD, buffer, sizeof(buffer)); } static inline @@ -860,24 +851,21 @@ static int pcan_usb_pro_stop(struct peak_usb_device *dev) */ static int pcan_usb_pro_init(struct peak_usb_device *dev) { + struct pcan_usb_pro_interface *usb_if; struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); - struct pcan_usb_pro_interface *usb_if = NULL; - struct pcan_usb_pro_fwinfo *fi = NULL; - struct pcan_usb_pro_blinfo *bi = NULL; - int err; /* do this for 1st channel only */ if (!dev->prev_siblings) { + struct pcan_usb_pro_fwinfo fi; + struct pcan_usb_pro_blinfo bi; + int err; + /* allocate netdevices common structure attached to first one */ usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface), GFP_KERNEL); - fi = kmalloc(sizeof(struct pcan_usb_pro_fwinfo), GFP_KERNEL); - bi = kmalloc(sizeof(struct pcan_usb_pro_blinfo), GFP_KERNEL); - if (!usb_if || !fi || !bi) { - err = -ENOMEM; - goto err_out; - } + if (!usb_if) + return -ENOMEM; /* number of ts msgs to ignore before taking one into account */ usb_if->cm_ignore_count = 5; @@ -889,34 +877,34 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) */ err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_FW, - fi, sizeof(*fi)); + &fi, sizeof(fi)); if (err) { + kfree(usb_if); dev_err(dev->netdev->dev.parent, "unable to read %s firmware info (err %d)\n", pcan_usb_pro.name, err); - goto err_out; + return err; } err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_BL, - bi, sizeof(*bi)); + &bi, sizeof(bi)); if (err) { + kfree(usb_if); dev_err(dev->netdev->dev.parent, "unable to read %s bootloader info (err %d)\n", pcan_usb_pro.name, err); - goto err_out; + return err; } - /* tell the device the can driver is running */ - err = pcan_usb_pro_drv_loaded(dev, 1); - if (err) - goto err_out; - dev_info(dev->netdev->dev.parent, "PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n", pcan_usb_pro.name, - bi->hw_rev, bi->serial_num_hi, bi->serial_num_lo, + bi.hw_rev, bi.serial_num_hi, bi.serial_num_lo, pcan_usb_pro.ctrl_count); + + /* tell the device the can driver is running */ + pcan_usb_pro_drv_loaded(dev, 1); } else { usb_if = pcan_usb_pro_dev_if(dev->prev_siblings); } @@ -928,13 +916,6 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) pcan_usb_pro_set_led(dev, 0, 1); return 0; - - err_out: - kfree(bi); - kfree(fi); - kfree(usb_if); - - return err; } static void pcan_usb_pro_exit(struct peak_usb_device *dev) diff --git a/trunk/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/trunk/drivers/net/can/usb/peak_usb/pcan_usb_pro.h index 32275af547e0..a869918c5620 100644 --- a/trunk/drivers/net/can/usb/peak_usb/pcan_usb_pro.h +++ b/trunk/drivers/net/can/usb/peak_usb/pcan_usb_pro.h @@ -29,7 +29,6 @@ /* Vendor Request value for XXX_FCT */ #define PCAN_USBPRO_FCT_DRVLD 5 /* tell device driver is loaded */ -#define PCAN_USBPRO_FCT_DRVLD_REQ_LEN 16 /* PCAN_USBPRO_INFO_BL vendor request record type */ struct __packed pcan_usb_pro_blinfo { diff --git a/trunk/drivers/net/can/usb/usb_8dev.c b/trunk/drivers/net/can/usb/usb_8dev.c index cbd388eea682..6e15ef08f301 100644 --- a/trunk/drivers/net/can/usb/usb_8dev.c +++ b/trunk/drivers/net/can/usb/usb_8dev.c @@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf, err = usb_8dev_cmd_version(priv, &version); if (err) { netdev_err(netdev, "can't get firmware version\n"); - goto cleanup_unregister_candev; + goto cleanup_cmd_msg_buffer; } else { netdev_info(netdev, "firmware: %d.%d, hardware: %d.%d\n", @@ -989,9 +989,6 @@ static int usb_8dev_probe(struct usb_interface *intf, return 0; -cleanup_unregister_candev: - unregister_netdev(priv->netdev); - cleanup_cmd_msg_buffer: kfree(priv->cmd_msg_buffer); diff --git a/trunk/drivers/net/ethernet/3com/3c59x.c b/trunk/drivers/net/ethernet/3com/3c59x.c index 072c6f14e8fc..de570a8f8967 100644 --- a/trunk/drivers/net/ethernet/3com/3c59x.c +++ b/trunk/drivers/net/ethernet/3com/3c59x.c @@ -632,6 +632,7 @@ struct vortex_private { pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ open:1, medialock:1, + must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ large_frames:1, /* accept large frames */ handling_irq:1; /* private in_irq indicator */ /* {get|set}_wol operations are already serialized by rtnl. @@ -1011,12 +1012,6 @@ static int vortex_init_one(struct pci_dev *pdev, if (rc < 0) goto out; - rc = pci_request_regions(pdev, DRV_NAME); - if (rc < 0) { - pci_disable_device(pdev); - goto out; - } - unit = vortex_cards_found; if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { @@ -1032,7 +1027,6 @@ static int vortex_init_one(struct pci_dev *pdev, if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { - pci_release_regions(pdev); pci_disable_device(pdev); rc = -ENOMEM; goto out; @@ -1042,7 +1036,6 @@ static int vortex_init_one(struct pci_dev *pdev, ent->driver_data, unit); if (rc < 0) { pci_iounmap(pdev, ioaddr); - pci_release_regions(pdev); pci_disable_device(pdev); goto out; } @@ -1185,6 +1178,11 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, /* PCI-only startup logic */ if (pdev) { + /* EISA resources already marked, so only PCI needs to do this here */ + /* Ignore return value, because Cardbus drivers already allocate for us */ + if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) + vp->must_free_region = 1; + /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) pci_set_master(pdev); @@ -1222,7 +1220,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, &vp->rx_ring_dma); retval = -ENOMEM; if (!vp->rx_ring) - goto free_device; + goto free_region; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; @@ -1486,7 +1484,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); -free_device: +free_region: + if (vp->must_free_region) + release_region(dev->base_addr, vci->io_size); free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); out: @@ -3254,9 +3254,8 @@ static void vortex_remove_one(struct pci_dev *pdev) + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); - - pci_release_regions(pdev); - + if (vp->must_free_region) + release_region(dev->base_addr, vp->io_size); free_netdev(dev); } diff --git a/trunk/drivers/net/ethernet/atheros/Kconfig b/trunk/drivers/net/ethernet/atheros/Kconfig index ad6aa1e98348..36d6abd1cfff 100644 --- a/trunk/drivers/net/ethernet/atheros/Kconfig +++ b/trunk/drivers/net/ethernet/atheros/Kconfig @@ -67,22 +67,4 @@ config ATL1C To compile this driver as a module, choose M here. The module will be called atl1c. -config ALX - tristate "Qualcomm Atheros AR816x/AR817x support" - depends on PCI - select CRC32 - select NET_CORE - select MDIO - help - This driver supports the Qualcomm Atheros L1F ethernet adapter, - i.e. the following chipsets: - - 1969:1091 - AR8161 Gigabit Ethernet - 1969:1090 - AR8162 Fast Ethernet - 1969:10A1 - AR8171 Gigabit Ethernet - 1969:10A0 - AR8172 Fast Ethernet - - To compile this driver as a module, choose M here. The module - will be called alx. - endif # NET_VENDOR_ATHEROS diff --git a/trunk/drivers/net/ethernet/atheros/Makefile b/trunk/drivers/net/ethernet/atheros/Makefile index 5cf1c65bbce9..e7e76fb576ff 100644 --- a/trunk/drivers/net/ethernet/atheros/Makefile +++ b/trunk/drivers/net/ethernet/atheros/Makefile @@ -6,4 +6,3 @@ obj-$(CONFIG_ATL1) += atlx/ obj-$(CONFIG_ATL2) += atlx/ obj-$(CONFIG_ATL1E) += atl1e/ obj-$(CONFIG_ATL1C) += atl1c/ -obj-$(CONFIG_ALX) += alx/ diff --git a/trunk/drivers/net/ethernet/atheros/alx/Makefile b/trunk/drivers/net/ethernet/atheros/alx/Makefile deleted file mode 100644 index 5901fa407d52..000000000000 --- a/trunk/drivers/net/ethernet/atheros/alx/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -obj-$(CONFIG_ALX) += alx.o -alx-objs := main.o ethtool.o hw.o -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/trunk/drivers/net/ethernet/atheros/alx/alx.h b/trunk/drivers/net/ethernet/atheros/alx/alx.h deleted file mode 100644 index 50b3ae2b143d..000000000000 --- a/trunk/drivers/net/ethernet/atheros/alx/alx.h +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) 2013 Johannes Berg - * - * This file is free software: you may copy, redistribute and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation, either version 2 of the License, or (at your - * option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * This file incorporates work covered by the following copyright and - * permission notice: - * - * Copyright (c) 2012 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _ALX_H_ -#define _ALX_H_ - -#include -#include -#include -#include -#include "hw.h" - -#define ALX_WATCHDOG_TIME (5 * HZ) - -struct alx_buffer { - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(dma); - DEFINE_DMA_UNMAP_LEN(size); -}; - -struct alx_rx_queue { - struct alx_rrd *rrd; - dma_addr_t rrd_dma; - - struct alx_rfd *rfd; - dma_addr_t rfd_dma; - - struct alx_buffer *bufs; - - u16 write_idx, read_idx; - u16 rrd_read_idx; -}; -#define ALX_RX_ALLOC_THRESH 32 - -struct alx_tx_queue { - struct alx_txd *tpd; - dma_addr_t tpd_dma; - struct alx_buffer *bufs; - u16 write_idx, read_idx; -}; - -#define ALX_DEFAULT_TX_WORK 128 - -enum alx_device_quirks { - ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0), -}; - -struct alx_priv { - struct net_device *dev; - - struct alx_hw hw; - - /* all descriptor memory */ - struct { - dma_addr_t dma; - void *virt; - int size; - } descmem; - - /* protect int_mask updates */ - spinlock_t irq_lock; - u32 int_mask; - - int tx_ringsz; - int rx_ringsz; - int rxbuf_size; - - struct napi_struct napi; - struct alx_tx_queue txq; - struct alx_rx_queue rxq; - - struct work_struct link_check_wk; - struct work_struct reset_wk; - - u16 msg_enable; - - bool msi; -}; - -extern const struct ethtool_ops alx_ethtool_ops; -extern const char alx_drv_name[]; - -#endif diff --git a/trunk/drivers/net/ethernet/atheros/alx/ethtool.c b/trunk/drivers/net/ethernet/atheros/alx/ethtool.c deleted file mode 100644 index 6fa2aec2bc81..000000000000 --- a/trunk/drivers/net/ethernet/atheros/alx/ethtool.c +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Copyright (c) 2013 Johannes Berg - * - * This file is free software: you may copy, redistribute and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation, either version 2 of the License, or (at your - * option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * This file incorporates work covered by the following copyright and - * permission notice: - * - * Copyright (c) 2012 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "alx.h" -#include "reg.h" -#include "hw.h" - - -static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - - ecmd->supported = SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_TP | - SUPPORTED_Pause; - if (alx_hw_giga(hw)) - ecmd->supported |= SUPPORTED_1000baseT_Full; - - ecmd->advertising = ADVERTISED_TP; - if (hw->adv_cfg & ADVERTISED_Autoneg) - ecmd->advertising |= hw->adv_cfg; - - ecmd->port = PORT_TP; - ecmd->phy_address = 0; - if (hw->adv_cfg & ADVERTISED_Autoneg) - ecmd->autoneg = AUTONEG_ENABLE; - else - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->transceiver = XCVR_INTERNAL; - - if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) { - if (hw->flowctrl & ALX_FC_RX) { - ecmd->advertising |= ADVERTISED_Pause; - - if (!(hw->flowctrl & ALX_FC_TX)) - ecmd->advertising |= ADVERTISED_Asym_Pause; - } else if (hw->flowctrl & ALX_FC_TX) { - ecmd->advertising |= ADVERTISED_Asym_Pause; - } - } - - if (hw->link_speed != SPEED_UNKNOWN) { - ethtool_cmd_speed_set(ecmd, - hw->link_speed - hw->link_speed % 10); - ecmd->duplex = hw->link_speed % 10; - } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - } - - return 0; -} - -static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - u32 adv_cfg; - - ASSERT_RTNL(); - - if (ecmd->autoneg == AUTONEG_ENABLE) { - if (ecmd->advertising & ADVERTISED_1000baseT_Half) - return -EINVAL; - adv_cfg = ecmd->advertising | ADVERTISED_Autoneg; - } else { - int speed = ethtool_cmd_speed(ecmd); - - switch (speed + ecmd->duplex) { - case SPEED_10 + DUPLEX_HALF: - adv_cfg = ADVERTISED_10baseT_Half; - break; - case SPEED_10 + DUPLEX_FULL: - adv_cfg = ADVERTISED_10baseT_Full; - break; - case SPEED_100 + DUPLEX_HALF: - adv_cfg = ADVERTISED_100baseT_Half; - break; - case SPEED_100 + DUPLEX_FULL: - adv_cfg = ADVERTISED_100baseT_Full; - break; - default: - return -EINVAL; - } - } - - hw->adv_cfg = adv_cfg; - return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl); -} - -static void alx_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - - if (hw->flowctrl & ALX_FC_ANEG && - hw->adv_cfg & ADVERTISED_Autoneg) - pause->autoneg = AUTONEG_ENABLE; - else - pause->autoneg = AUTONEG_DISABLE; - - if (hw->flowctrl & ALX_FC_TX) - pause->tx_pause = 1; - else - pause->tx_pause = 0; - - if (hw->flowctrl & ALX_FC_RX) - pause->rx_pause = 1; - else - pause->rx_pause = 0; -} - - -static int alx_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - int err = 0; - bool reconfig_phy = false; - u8 fc = 0; - - if (pause->tx_pause) - fc |= ALX_FC_TX; - if (pause->rx_pause) - fc |= ALX_FC_RX; - if (pause->autoneg) - fc |= ALX_FC_ANEG; - - ASSERT_RTNL(); - - /* restart auto-neg for auto-mode */ - if (hw->adv_cfg & ADVERTISED_Autoneg) { - if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG)) - reconfig_phy = true; - if (fc & hw->flowctrl & ALX_FC_ANEG && - (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) - reconfig_phy = true; - } - - if (reconfig_phy) { - err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc); - return err; - } - - /* flow control on mac */ - if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) - alx_cfg_mac_flowcontrol(hw, fc); - - hw->flowctrl = fc; - - return 0; -} - -static u32 alx_get_msglevel(struct net_device *netdev) -{ - struct alx_priv *alx = netdev_priv(netdev); - - return alx->msg_enable; -} - -static void alx_set_msglevel(struct net_device *netdev, u32 data) -{ - struct alx_priv *alx = netdev_priv(netdev); - - alx->msg_enable = data; -} - -static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - - wol->supported = WAKE_MAGIC | WAKE_PHY; - wol->wolopts = 0; - - if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) - wol->wolopts |= WAKE_MAGIC; - if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) - wol->wolopts |= WAKE_PHY; -} - -static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - - if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | - WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) - return -EOPNOTSUPP; - - hw->sleep_ctrl = 0; - - if (wol->wolopts & WAKE_MAGIC) - hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC; - if (wol->wolopts & WAKE_PHY) - hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY; - - device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl); - - return 0; -} - -static void alx_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct alx_priv *alx = netdev_priv(netdev); - - strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev), - sizeof(drvinfo->bus_info)); -} - -const struct ethtool_ops alx_ethtool_ops = { - .get_settings = alx_get_settings, - .set_settings = alx_set_settings, - .get_pauseparam = alx_get_pauseparam, - .set_pauseparam = alx_set_pauseparam, - .get_drvinfo = alx_get_drvinfo, - .get_msglevel = alx_get_msglevel, - .set_msglevel = alx_set_msglevel, - .get_wol = alx_get_wol, - .set_wol = alx_set_wol, - .get_link = ethtool_op_get_link, -}; diff --git a/trunk/drivers/net/ethernet/atheros/alx/hw.c b/trunk/drivers/net/ethernet/atheros/alx/hw.c deleted file mode 100644 index 220a16ad0e49..000000000000 --- a/trunk/drivers/net/ethernet/atheros/alx/hw.c +++ /dev/null @@ -1,1226 +0,0 @@ -/* - * Copyright (c) 2013 Johannes Berg - * - * This file is free software: you may copy, redistribute and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation, either version 2 of the License, or (at your - * option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * This file incorporates work covered by the following copyright and - * permission notice: - * - * Copyright (c) 2012 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -#include -#include -#include -#include -#include "reg.h" -#include "hw.h" - -static inline bool alx_is_rev_a(u8 rev) -{ - return rev == ALX_REV_A0 || rev == ALX_REV_A1; -} - -static int alx_wait_mdio_idle(struct alx_hw *hw) -{ - u32 val; - int i; - - for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) { - val = alx_read_mem32(hw, ALX_MDIO); - if (!(val & ALX_MDIO_BUSY)) - return 0; - udelay(10); - } - - return -ETIMEDOUT; -} - -static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev, - u16 reg, u16 *phy_data) -{ - u32 val, clk_sel; - int err; - - *phy_data = 0; - - /* use slow clock when it's in hibernation status */ - clk_sel = hw->link_speed != SPEED_UNKNOWN ? - ALX_MDIO_CLK_SEL_25MD4 : - ALX_MDIO_CLK_SEL_25MD128; - - if (ext) { - val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT | - reg << ALX_MDIO_EXTN_REG_SHIFT; - alx_write_mem32(hw, ALX_MDIO_EXTN, val); - - val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START | - ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ | - clk_sel << ALX_MDIO_CLK_SEL_SHIFT; - } else { - val = ALX_MDIO_SPRES_PRMBL | - clk_sel << ALX_MDIO_CLK_SEL_SHIFT | - reg << ALX_MDIO_REG_SHIFT | - ALX_MDIO_START | ALX_MDIO_OP_READ; - } - alx_write_mem32(hw, ALX_MDIO, val); - - err = alx_wait_mdio_idle(hw); - if (err) - return err; - val = alx_read_mem32(hw, ALX_MDIO); - *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA); - return 0; -} - -static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev, - u16 reg, u16 phy_data) -{ - u32 val, clk_sel; - - /* use slow clock when it's in hibernation status */ - clk_sel = hw->link_speed != SPEED_UNKNOWN ? - ALX_MDIO_CLK_SEL_25MD4 : - ALX_MDIO_CLK_SEL_25MD128; - - if (ext) { - val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT | - reg << ALX_MDIO_EXTN_REG_SHIFT; - alx_write_mem32(hw, ALX_MDIO_EXTN, val); - - val = ALX_MDIO_SPRES_PRMBL | - clk_sel << ALX_MDIO_CLK_SEL_SHIFT | - phy_data << ALX_MDIO_DATA_SHIFT | - ALX_MDIO_START | ALX_MDIO_MODE_EXT; - } else { - val = ALX_MDIO_SPRES_PRMBL | - clk_sel << ALX_MDIO_CLK_SEL_SHIFT | - reg << ALX_MDIO_REG_SHIFT | - phy_data << ALX_MDIO_DATA_SHIFT | - ALX_MDIO_START; - } - alx_write_mem32(hw, ALX_MDIO, val); - - return alx_wait_mdio_idle(hw); -} - -static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) -{ - return alx_read_phy_core(hw, false, 0, reg, phy_data); -} - -static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) -{ - return alx_write_phy_core(hw, false, 0, reg, phy_data); -} - -static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) -{ - return alx_read_phy_core(hw, true, dev, reg, pdata); -} - -static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) -{ - return alx_write_phy_core(hw, true, dev, reg, data); -} - -static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) -{ - int err; - - err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); - if (err) - return err; - - return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata); -} - -static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) -{ - int err; - - err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); - if (err) - return err; - - return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data); -} - -int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) -{ - int err; - - spin_lock(&hw->mdio_lock); - err = __alx_read_phy_reg(hw, reg, phy_data); - spin_unlock(&hw->mdio_lock); - - return err; -} - -int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) -{ - int err; - - spin_lock(&hw->mdio_lock); - err = __alx_write_phy_reg(hw, reg, phy_data); - spin_unlock(&hw->mdio_lock); - - return err; -} - -int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) -{ - int err; - - spin_lock(&hw->mdio_lock); - err = __alx_read_phy_ext(hw, dev, reg, pdata); - spin_unlock(&hw->mdio_lock); - - return err; -} - -int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) -{ - int err; - - spin_lock(&hw->mdio_lock); - err = __alx_write_phy_ext(hw, dev, reg, data); - spin_unlock(&hw->mdio_lock); - - return err; -} - -static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) -{ - int err; - - spin_lock(&hw->mdio_lock); - err = __alx_read_phy_dbg(hw, reg, pdata); - spin_unlock(&hw->mdio_lock); - - return err; -} - -static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) -{ - int err; - - spin_lock(&hw->mdio_lock); - err = __alx_write_phy_dbg(hw, reg, data); - spin_unlock(&hw->mdio_lock); - - return err; -} - -static u16 alx_get_phy_config(struct alx_hw *hw) -{ - u32 val; - u16 phy_val; - - val = alx_read_mem32(hw, ALX_PHY_CTRL); - /* phy in reset */ - if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0) - return ALX_DRV_PHY_UNKNOWN; - - val = alx_read_mem32(hw, ALX_DRV); - val = ALX_GET_FIELD(val, ALX_DRV_PHY); - if (ALX_DRV_PHY_UNKNOWN == val) - return ALX_DRV_PHY_UNKNOWN; - - alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val); - if (ALX_PHY_INITED == phy_val) - return val; - - return ALX_DRV_PHY_UNKNOWN; -} - -static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val) -{ - u32 read; - int i; - - for (i = 0; i < ALX_SLD_MAX_TO; i++) { - read = alx_read_mem32(hw, reg); - if ((read & wait) == 0) { - if (val) - *val = read; - return true; - } - mdelay(1); - } - - return false; -} - -static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr) -{ - u32 mac0, mac1; - - mac0 = alx_read_mem32(hw, ALX_STAD0); - mac1 = alx_read_mem32(hw, ALX_STAD1); - - /* addr should be big-endian */ - *(__be32 *)(addr + 2) = cpu_to_be32(mac0); - *(__be16 *)addr = cpu_to_be16(mac1); - - return is_valid_ether_addr(addr); -} - -int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr) -{ - u32 val; - - /* try to get it from register first */ - if (alx_read_macaddr(hw, addr)) - return 0; - - /* try to load from efuse */ - if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val)) - return -EIO; - alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START); - if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL)) - return -EIO; - if (alx_read_macaddr(hw, addr)) - return 0; - - /* try to load from flash/eeprom (if present) */ - val = alx_read_mem32(hw, ALX_EFLD); - if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) { - if (!alx_wait_reg(hw, ALX_EFLD, - ALX_EFLD_STAT | ALX_EFLD_START, &val)) - return -EIO; - alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START); - if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL)) - return -EIO; - if (alx_read_macaddr(hw, addr)) - return 0; - } - - return -EIO; -} - -void alx_set_macaddr(struct alx_hw *hw, const u8 *addr) -{ - u32 val; - - /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */ - val = be32_to_cpu(*(__be32 *)(addr + 2)); - alx_write_mem32(hw, ALX_STAD0, val); - val = be16_to_cpu(*(__be16 *)addr); - alx_write_mem32(hw, ALX_STAD1, val); -} - -static void alx_enable_osc(struct alx_hw *hw) -{ - u32 val; - - /* rising edge */ - val = alx_read_mem32(hw, ALX_MISC); - alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN); - alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); -} - -static void alx_reset_osc(struct alx_hw *hw, u8 rev) -{ - u32 val, val2; - - /* clear Internal OSC settings, switching OSC by hw itself */ - val = alx_read_mem32(hw, ALX_MISC3); - alx_write_mem32(hw, ALX_MISC3, - (val & ~ALX_MISC3_25M_BY_SW) | - ALX_MISC3_25M_NOTO_INTNL); - - /* 25M clk from chipset may be unstable 1s after de-assert of - * PERST, driver need re-calibrate before enter Sleep for WoL - */ - val = alx_read_mem32(hw, ALX_MISC); - if (rev >= ALX_REV_B0) { - /* restore over current protection def-val, - * this val could be reset by MAC-RST - */ - ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF); - /* a 0->1 change will update the internal val of osc */ - val &= ~ALX_MISC_INTNLOSC_OPEN; - alx_write_mem32(hw, ALX_MISC, val); - alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); - /* hw will automatically dis OSC after cab. */ - val2 = alx_read_mem32(hw, ALX_MSIC2); - val2 &= ~ALX_MSIC2_CALB_START; - alx_write_mem32(hw, ALX_MSIC2, val2); - alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START); - } else { - val &= ~ALX_MISC_INTNLOSC_OPEN; - /* disable isolate for rev A devices */ - if (alx_is_rev_a(rev)) - val &= ~ALX_MISC_ISO_EN; - - alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); - alx_write_mem32(hw, ALX_MISC, val); - } - - udelay(20); -} - -static int alx_stop_mac(struct alx_hw *hw) -{ - u32 rxq, txq, val; - u16 i; - - rxq = alx_read_mem32(hw, ALX_RXQ0); - alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN); - txq = alx_read_mem32(hw, ALX_TXQ0); - alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN); - - udelay(40); - - hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); - alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); - - for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { - val = alx_read_mem32(hw, ALX_MAC_STS); - if (!(val & ALX_MAC_STS_IDLE)) - return 0; - udelay(10); - } - - return -ETIMEDOUT; -} - -int alx_reset_mac(struct alx_hw *hw) -{ - u32 val, pmctrl; - int i, ret; - u8 rev; - bool a_cr; - - pmctrl = 0; - rev = alx_hw_revision(hw); - a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw); - - /* disable all interrupts, RXQ/TXQ */ - alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF); - alx_write_mem32(hw, ALX_IMR, 0); - alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); - - ret = alx_stop_mac(hw); - if (ret) - return ret; - - /* mac reset workaroud */ - alx_write_mem32(hw, ALX_RFD_PIDX, 1); - - /* dis l0s/l1 before mac reset */ - if (a_cr) { - pmctrl = alx_read_mem32(hw, ALX_PMCTRL); - if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) - alx_write_mem32(hw, ALX_PMCTRL, - pmctrl & ~(ALX_PMCTRL_L1_EN | - ALX_PMCTRL_L0S_EN)); - } - - /* reset whole mac safely */ - val = alx_read_mem32(hw, ALX_MASTER); - alx_write_mem32(hw, ALX_MASTER, - val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS); - - /* make sure it's real idle */ - udelay(10); - for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { - val = alx_read_mem32(hw, ALX_RFD_PIDX); - if (val == 0) - break; - udelay(10); - } - for (; i < ALX_DMA_MAC_RST_TO; i++) { - val = alx_read_mem32(hw, ALX_MASTER); - if ((val & ALX_MASTER_DMA_MAC_RST) == 0) - break; - udelay(10); - } - if (i == ALX_DMA_MAC_RST_TO) - return -EIO; - udelay(10); - - if (a_cr) { - alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS); - /* restore l0s / l1 */ - if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) - alx_write_mem32(hw, ALX_PMCTRL, pmctrl); - } - - alx_reset_osc(hw, rev); - - /* clear Internal OSC settings, switching OSC by hw itself, - * disable isolate for rev A devices - */ - val = alx_read_mem32(hw, ALX_MISC3); - alx_write_mem32(hw, ALX_MISC3, - (val & ~ALX_MISC3_25M_BY_SW) | - ALX_MISC3_25M_NOTO_INTNL); - val = alx_read_mem32(hw, ALX_MISC); - val &= ~ALX_MISC_INTNLOSC_OPEN; - if (alx_is_rev_a(rev)) - val &= ~ALX_MISC_ISO_EN; - alx_write_mem32(hw, ALX_MISC, val); - udelay(20); - - /* driver control speed/duplex, hash-alg */ - alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); - - val = alx_read_mem32(hw, ALX_SERDES); - alx_write_mem32(hw, ALX_SERDES, - val | ALX_SERDES_MACCLK_SLWDWN | - ALX_SERDES_PHYCLK_SLWDWN); - - return 0; -} - -void alx_reset_phy(struct alx_hw *hw) -{ - int i; - u32 val; - u16 phy_val; - - /* (DSP)reset PHY core */ - val = alx_read_mem32(hw, ALX_PHY_CTRL); - val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ | - ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN | - ALX_PHY_CTRL_CLS); - val |= ALX_PHY_CTRL_RST_ANALOG; - - val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN); - alx_write_mem32(hw, ALX_PHY_CTRL, val); - udelay(10); - alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT); - - for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++) - udelay(10); - - /* phy power saving & hib */ - alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF); - alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL, - ALX_SYSMODCTRL_IECHOADJ_DEF); - alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS, - ALX_VDRVBIAS_DEF); - - /* EEE advertisement */ - val = alx_read_mem32(hw, ALX_LPI_CTRL); - alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN); - alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0); - - /* phy power saving */ - alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF); - alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF); - alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF); - alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF); - alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); - alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, - phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN); - /* rtl8139c, 120m issue */ - alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78, - ALX_MIIEXT_NLP78_120M_DEF); - alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10, - ALX_MIIEXT_S3DIG10_DEF); - - if (hw->lnk_patch) { - /* Turn off half amplitude */ - alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, - &phy_val); - alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, - phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT); - /* Turn off Green feature */ - alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); - alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, - phy_val | ALX_GREENCFG2_BP_GREEN); - /* Turn off half Bias */ - alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, - &phy_val); - alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, - phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS); - } - - /* set phy interrupt mask */ - alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN); -} - -#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO) - -void alx_reset_pcie(struct alx_hw *hw) -{ - u8 rev = alx_hw_revision(hw); - u32 val; - u16 val16; - - /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ - pci_read_config_word(hw->pdev, PCI_COMMAND, &val16); - if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) { - val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE; - pci_write_config_word(hw->pdev, PCI_COMMAND, val16); - } - - /* clear WoL setting/status */ - val = alx_read_mem32(hw, ALX_WOL0); - alx_write_mem32(hw, ALX_WOL0, 0); - - val = alx_read_mem32(hw, ALX_PDLL_TRNS1); - alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN); - - /* mask some pcie error bits */ - val = alx_read_mem32(hw, ALX_UE_SVRT); - val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR); - alx_write_mem32(hw, ALX_UE_SVRT, val); - - /* wol 25M & pclk */ - val = alx_read_mem32(hw, ALX_MASTER); - if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) { - if ((val & ALX_MASTER_WAKEN_25M) == 0 || - (val & ALX_MASTER_PCLKSEL_SRDS) == 0) - alx_write_mem32(hw, ALX_MASTER, - val | ALX_MASTER_PCLKSEL_SRDS | - ALX_MASTER_WAKEN_25M); - } else { - if ((val & ALX_MASTER_WAKEN_25M) == 0 || - (val & ALX_MASTER_PCLKSEL_SRDS) != 0) - alx_write_mem32(hw, ALX_MASTER, - (val & ~ALX_MASTER_PCLKSEL_SRDS) | - ALX_MASTER_WAKEN_25M); - } - - /* ASPM setting */ - alx_enable_aspm(hw, true, true); - - udelay(10); -} - -void alx_start_mac(struct alx_hw *hw) -{ - u32 mac, txq, rxq; - - rxq = alx_read_mem32(hw, ALX_RXQ0); - alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN); - txq = alx_read_mem32(hw, ALX_TXQ0); - alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); - - mac = hw->rx_ctrl; - if (hw->link_speed % 10 == DUPLEX_FULL) - mac |= ALX_MAC_CTRL_FULLD; - else - mac &= ~ALX_MAC_CTRL_FULLD; - ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, - hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 : - ALX_MAC_CTRL_SPEED_10_100); - mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN; - hw->rx_ctrl = mac; - alx_write_mem32(hw, ALX_MAC_CTRL, mac); -} - -void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc) -{ - if (fc & ALX_FC_RX) - hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN; - else - hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN; - - if (fc & ALX_FC_TX) - hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN; - else - hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN; - - alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); -} - -void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en) -{ - u32 pmctrl; - u8 rev = alx_hw_revision(hw); - - pmctrl = alx_read_mem32(hw, ALX_PMCTRL); - - ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER, - ALX_PMCTRL_LCKDET_TIMER_DEF); - pmctrl |= ALX_PMCTRL_RCVR_WT_1US | - ALX_PMCTRL_L1_CLKSW_EN | - ALX_PMCTRL_L1_SRDSRX_PWD; - ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF); - ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US); - pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN | - ALX_PMCTRL_L1_SRDSPLL_EN | - ALX_PMCTRL_L1_BUFSRX_EN | - ALX_PMCTRL_SADLY_EN | - ALX_PMCTRL_HOTRST_WTEN| - ALX_PMCTRL_L0S_EN | - ALX_PMCTRL_L1_EN | - ALX_PMCTRL_ASPM_FCEN | - ALX_PMCTRL_TXL1_AFTER_L0S | - ALX_PMCTRL_RXL1_AFTER_L0S); - if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) - pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN; - - if (l0s_en) - pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN); - if (l1_en) - pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN); - - alx_write_mem32(hw, ALX_PMCTRL, pmctrl); -} - - -static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg) -{ - u32 cfg = 0; - - if (ethadv_cfg & ADVERTISED_Autoneg) { - cfg |= ALX_DRV_PHY_AUTO; - if (ethadv_cfg & ADVERTISED_10baseT_Half) - cfg |= ALX_DRV_PHY_10; - if (ethadv_cfg & ADVERTISED_10baseT_Full) - cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; - if (ethadv_cfg & ADVERTISED_100baseT_Half) - cfg |= ALX_DRV_PHY_100; - if (ethadv_cfg & ADVERTISED_100baseT_Full) - cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; - if (ethadv_cfg & ADVERTISED_1000baseT_Half) - cfg |= ALX_DRV_PHY_1000; - if (ethadv_cfg & ADVERTISED_1000baseT_Full) - cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; - if (ethadv_cfg & ADVERTISED_Pause) - cfg |= ADVERTISE_PAUSE_CAP; - if (ethadv_cfg & ADVERTISED_Asym_Pause) - cfg |= ADVERTISE_PAUSE_ASYM; - } else { - switch (ethadv_cfg) { - case ADVERTISED_10baseT_Half: - cfg |= ALX_DRV_PHY_10; - break; - case ADVERTISED_100baseT_Half: - cfg |= ALX_DRV_PHY_100; - break; - case ADVERTISED_10baseT_Full: - cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; - break; - case ADVERTISED_100baseT_Full: - cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; - break; - } - } - - return cfg; -} - -int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl) -{ - u16 adv, giga, cr; - u32 val; - int err = 0; - - alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0); - val = alx_read_mem32(hw, ALX_DRV); - ALX_SET_FIELD(val, ALX_DRV_PHY, 0); - - if (ethadv & ADVERTISED_Autoneg) { - adv = ADVERTISE_CSMA; - adv |= ethtool_adv_to_mii_adv_t(ethadv); - - if (flowctrl & ALX_FC_ANEG) { - if (flowctrl & ALX_FC_RX) { - adv |= ADVERTISED_Pause; - if (!(flowctrl & ALX_FC_TX)) - adv |= ADVERTISED_Asym_Pause; - } else if (flowctrl & ALX_FC_TX) { - adv |= ADVERTISED_Asym_Pause; - } - } - giga = 0; - if (alx_hw_giga(hw)) - giga = ethtool_adv_to_mii_ctrl1000_t(ethadv); - - cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; - - if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) || - alx_write_phy_reg(hw, MII_CTRL1000, giga) || - alx_write_phy_reg(hw, MII_BMCR, cr)) - err = -EBUSY; - } else { - cr = BMCR_RESET; - if (ethadv == ADVERTISED_100baseT_Half || - ethadv == ADVERTISED_100baseT_Full) - cr |= BMCR_SPEED100; - if (ethadv == ADVERTISED_10baseT_Full || - ethadv == ADVERTISED_100baseT_Full) - cr |= BMCR_FULLDPLX; - - err = alx_write_phy_reg(hw, MII_BMCR, cr); - } - - if (!err) { - alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED); - val |= ethadv_to_hw_cfg(hw, ethadv); - } - - alx_write_mem32(hw, ALX_DRV, val); - - return err; -} - - -void alx_post_phy_link(struct alx_hw *hw) -{ - u16 phy_val, len, agc; - u8 revid = alx_hw_revision(hw); - bool adj_th = revid == ALX_REV_B0; - int speed; - - if (hw->link_speed == SPEED_UNKNOWN) - speed = SPEED_UNKNOWN; - else - speed = hw->link_speed - hw->link_speed % 10; - - if (revid != ALX_REV_B0 && !alx_is_rev_a(revid)) - return; - - /* 1000BT/AZ, wrong cable length */ - if (speed != SPEED_UNKNOWN) { - alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6, - &phy_val); - len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN); - alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val); - agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA); - - if ((speed == SPEED_1000 && - (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G || - (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) || - (speed == SPEED_100 && - (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M || - (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) { - alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, - ALX_AZ_ANADECT_LONG); - alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, - &phy_val); - alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, - phy_val | ALX_AFE_10BT_100M_TH); - } else { - alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, - ALX_AZ_ANADECT_DEF); - alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, - ALX_MIIEXT_AFE, &phy_val); - alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, - phy_val & ~ALX_AFE_10BT_100M_TH); - } - - /* threshold adjust */ - if (adj_th && hw->lnk_patch) { - if (speed == SPEED_100) { - alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, - ALX_MSE16DB_UP); - } else if (speed == SPEED_1000) { - /* - * Giga link threshold, raise the tolerance of - * noise 50% - */ - alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, - &phy_val); - ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH, - ALX_MSE20DB_TH_HI); - alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, - phy_val); - } - } - } else { - alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, - &phy_val); - alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, - phy_val & ~ALX_AFE_10BT_100M_TH); - - if (adj_th && hw->lnk_patch) { - alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, - ALX_MSE16DB_DOWN); - alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val); - ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH, - ALX_MSE20DB_TH_DEF); - alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val); - } - } -} - - -/* NOTE: - * 1. phy link must be established before calling this function - * 2. wol option (pattern,magic,link,etc.) is configed before call it. - */ -int alx_pre_suspend(struct alx_hw *hw, int speed) -{ - u32 master, mac, phy, val; - int err = 0; - - master = alx_read_mem32(hw, ALX_MASTER); - master &= ~ALX_MASTER_PCLKSEL_SRDS; - mac = hw->rx_ctrl; - /* 10/100 half */ - ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100); - mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); - - phy = alx_read_mem32(hw, ALX_PHY_CTRL); - phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS); - phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE | - ALX_PHY_CTRL_HIB_EN; - - /* without any activity */ - if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) { - err = alx_write_phy_reg(hw, ALX_MII_IER, 0); - if (err) - return err; - phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN; - } else { - if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS)) - mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN; - if (hw->sleep_ctrl & ALX_SLEEP_CIFS) - mac |= ALX_MAC_CTRL_TX_EN; - if (speed % 10 == DUPLEX_FULL) - mac |= ALX_MAC_CTRL_FULLD; - if (speed >= SPEED_1000) - ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, - ALX_MAC_CTRL_SPEED_1000); - phy |= ALX_PHY_CTRL_DSPRST_OUT; - err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, - ALX_MIIEXT_S3DIG10, - ALX_MIIEXT_S3DIG10_SL); - if (err) - return err; - } - - alx_enable_osc(hw); - hw->rx_ctrl = mac; - alx_write_mem32(hw, ALX_MASTER, master); - alx_write_mem32(hw, ALX_MAC_CTRL, mac); - alx_write_mem32(hw, ALX_PHY_CTRL, phy); - - /* set val of PDLL D3PLLOFF */ - val = alx_read_mem32(hw, ALX_PDLL_TRNS1); - val |= ALX_PDLL_TRNS1_D3PLLOFF_EN; - alx_write_mem32(hw, ALX_PDLL_TRNS1, val); - - return 0; -} - -bool alx_phy_configured(struct alx_hw *hw) -{ - u32 cfg, hw_cfg; - - cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg); - cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY); - hw_cfg = alx_get_phy_config(hw); - - if (hw_cfg == ALX_DRV_PHY_UNKNOWN) - return false; - - return cfg == hw_cfg; -} - -int alx_get_phy_link(struct alx_hw *hw, int *speed) -{ - struct pci_dev *pdev = hw->pdev; - u16 bmsr, giga; - int err; - - err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); - if (err) - return err; - - err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); - if (err) - return err; - - if (!(bmsr & BMSR_LSTATUS)) { - *speed = SPEED_UNKNOWN; - return 0; - } - - /* speed/duplex result is saved in PHY Specific Status Register */ - err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga); - if (err) - return err; - - if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED)) - goto wrong_speed; - - switch (giga & ALX_GIGA_PSSR_SPEED) { - case ALX_GIGA_PSSR_1000MBS: - *speed = SPEED_1000; - break; - case ALX_GIGA_PSSR_100MBS: - *speed = SPEED_100; - break; - case ALX_GIGA_PSSR_10MBS: - *speed = SPEED_10; - break; - default: - goto wrong_speed; - } - - *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF; - return 1; - -wrong_speed: - dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga); - return -EINVAL; -} - -int alx_clear_phy_intr(struct alx_hw *hw) -{ - u16 isr; - - /* clear interrupt status by reading it */ - return alx_read_phy_reg(hw, ALX_MII_ISR, &isr); -} - -int alx_config_wol(struct alx_hw *hw) -{ - u32 wol = 0; - int err = 0; - - /* turn on magic packet event */ - if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) - wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN; - - /* turn on link up event */ - if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) { - wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK; - /* only link up can wake up */ - err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP); - } - alx_write_mem32(hw, ALX_WOL0, wol); - - return err; -} - -void alx_disable_rss(struct alx_hw *hw) -{ - u32 ctrl = alx_read_mem32(hw, ALX_RXQ0); - - ctrl &= ~ALX_RXQ0_RSS_HASH_EN; - alx_write_mem32(hw, ALX_RXQ0, ctrl); -} - -void alx_configure_basic(struct alx_hw *hw) -{ - u32 val, raw_mtu, max_payload; - u16 val16; - u8 chip_rev = alx_hw_revision(hw); - - alx_set_macaddr(hw, hw->mac_addr); - - alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL); - - /* idle timeout to switch clk_125M */ - if (chip_rev >= ALX_REV_B0) - alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER, - ALX_IDLE_DECISN_TIMER_DEF); - - alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL); - - val = alx_read_mem32(hw, ALX_MASTER); - val |= ALX_MASTER_IRQMOD2_EN | - ALX_MASTER_IRQMOD1_EN | - ALX_MASTER_SYSALVTIMER_EN; - alx_write_mem32(hw, ALX_MASTER, val); - alx_write_mem32(hw, ALX_IRQ_MODU_TIMER, - (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT); - /* intr re-trig timeout */ - alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO); - /* tpd threshold to trig int */ - alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd); - alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt); - - raw_mtu = hw->mtu + ETH_HLEN; - alx_write_mem32(hw, ALX_MTU, raw_mtu + 8); - if (raw_mtu > ALX_MTU_JUMBO_TH) - hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE; - - if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH) - val = (raw_mtu + 8 + 7) >> 3; - else - val = ALX_TXQ1_JUMBO_TSO_TH >> 3; - alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN); - - max_payload = pcie_get_readrq(hw->pdev) >> 8; - /* - * if BIOS had changed the default dma read max length, - * restore it to default value - */ - if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN) - pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN); - - val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT | - ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN | - ALX_TXQ0_SUPT_IPOPT | - ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT; - alx_write_mem32(hw, ALX_TXQ0, val); - val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT | - ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT | - ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT | - ALX_HQTPD_BURST_EN; - alx_write_mem32(hw, ALX_HQTPD, val); - - /* rxq, flow control */ - val = alx_read_mem32(hw, ALX_SRAM5); - val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3; - if (val > ALX_SRAM_RXF_LEN_8K) { - val16 = ALX_MTU_STD_ALGN >> 3; - val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3; - } else { - val16 = ALX_MTU_STD_ALGN >> 3; - val = (val - ALX_MTU_STD_ALGN) >> 3; - } - alx_write_mem32(hw, ALX_RXQ2, - val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT | - val << ALX_RXQ2_RXF_XON_THRESH_SHIFT); - val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT | - ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT | - ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT | - ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN | - ALX_RXQ0_IPV6_PARSE_EN; - - if (alx_hw_giga(hw)) - ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH, - ALX_RXQ0_ASPM_THRESH_100M); - - alx_write_mem32(hw, ALX_RXQ0, val); - - val = alx_read_mem32(hw, ALX_DMA); - val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT | - ALX_DMA_RREQ_PRI_DATA | - max_payload << ALX_DMA_RREQ_BLEN_SHIFT | - ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT | - ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT | - (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT; - alx_write_mem32(hw, ALX_DMA, val); - - /* default multi-tx-q weights */ - val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT | - 4 << ALX_WRR_PRI0_SHIFT | - 4 << ALX_WRR_PRI1_SHIFT | - 4 << ALX_WRR_PRI2_SHIFT | - 4 << ALX_WRR_PRI3_SHIFT; - alx_write_mem32(hw, ALX_WRR, val); -} - -static inline u32 alx_speed_to_ethadv(int speed) -{ - switch (speed) { - case SPEED_1000 + DUPLEX_FULL: - return ADVERTISED_1000baseT_Full; - case SPEED_100 + DUPLEX_FULL: - return ADVERTISED_100baseT_Full; - case SPEED_100 + DUPLEX_HALF: - return ADVERTISED_10baseT_Half; - case SPEED_10 + DUPLEX_FULL: - return ADVERTISED_10baseT_Full; - case SPEED_10 + DUPLEX_HALF: - return ADVERTISED_10baseT_Half; - default: - return 0; - } -} - -int alx_select_powersaving_speed(struct alx_hw *hw, int *speed) -{ - int i, err, spd; - u16 lpa; - - err = alx_get_phy_link(hw, &spd); - if (err < 0) - return err; - - if (spd == SPEED_UNKNOWN) - return 0; - - err = alx_read_phy_reg(hw, MII_LPA, &lpa); - if (err) - return err; - - if (!(lpa & LPA_LPACK)) { - *speed = spd; - return 0; - } - - if (lpa & LPA_10FULL) - *speed = SPEED_10 + DUPLEX_FULL; - else if (lpa & LPA_10HALF) - *speed = SPEED_10 + DUPLEX_HALF; - else if (lpa & LPA_100FULL) - *speed = SPEED_100 + DUPLEX_FULL; - else - *speed = SPEED_100 + DUPLEX_HALF; - - if (*speed != spd) { - err = alx_write_phy_reg(hw, ALX_MII_IER, 0); - if (err) - return err; - err = alx_setup_speed_duplex(hw, - alx_speed_to_ethadv(*speed) | - ADVERTISED_Autoneg, - ALX_FC_ANEG | ALX_FC_RX | - ALX_FC_TX); - if (err) - return err; - - /* wait for linkup */ - for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) { - int speed2; - - msleep(100); - - err = alx_get_phy_link(hw, &speed2); - if (err < 0) - return err; - if (speed2 != SPEED_UNKNOWN) - break; - } - if (i == ALX_MAX_SETUP_LNK_CYCLE) - return -ETIMEDOUT; - } - - return 0; -} - -bool alx_get_phy_info(struct alx_hw *hw) -{ - u16 devs1, devs2; - - if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) || - alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1])) - return false; - - /* since we haven't PMA/PMD status2 register, we can't - * use mdio45_probe function for prtad and mmds. - * use fixed MMD3 to get mmds. - */ - if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) || - alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2)) - return false; - hw->mdio.mmds = devs1 | devs2 << 16; - - return true; -} diff --git a/trunk/drivers/net/ethernet/atheros/alx/hw.h b/trunk/drivers/net/ethernet/atheros/alx/hw.h deleted file mode 100644 index 65e723d2172a..000000000000 --- a/trunk/drivers/net/ethernet/atheros/alx/hw.h +++ /dev/null @@ -1,499 +0,0 @@ -/* - * Copyright (c) 2013 Johannes Berg - * - * This file is free software: you may copy, redistribute and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation, either version 2 of the License, or (at your - * option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * This file incorporates work covered by the following copyright and - * permission notice: - * - * Copyright (c) 2012 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef ALX_HW_H_ -#define ALX_HW_H_ -#include -#include -#include -#include "reg.h" - -/* Transmit Packet Descriptor, contains 4 32-bit words. - * - * 31 16 0 - * +----------------+----------------+ - * | vlan-tag | buf length | - * +----------------+----------------+ - * | Word 1 | - * +----------------+----------------+ - * | Word 2: buf addr lo | - * +----------------+----------------+ - * | Word 3: buf addr hi | - * +----------------+----------------+ - * - * Word 2 and 3 combine to form a 64-bit buffer address - * - * Word 1 has three forms, depending on the state of bit 8/12/13: - * if bit8 =='1', the definition is just for custom checksum offload. - * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor - * for the skb is special for LSO V2, Word 2 become total skb length , - * Word 3 is meaningless. - * other condition, the definition is for general skb or ip/tcp/udp - * checksum or LSO(TSO) offload. - * - * Here is the depiction: - * - * 0-+ 0-+ - * 1 | 1 | - * 2 | 2 | - * 3 | Payload offset 3 | L4 header offset - * 4 | (7:0) 4 | (7:0) - * 5 | 5 | - * 6 | 6 | - * 7-+ 7-+ - * 8 Custom csum enable = 1 8 Custom csum enable = 0 - * 9 General IPv4 checksum 9 General IPv4 checksum - * 10 General TCP checksum 10 General TCP checksum - * 11 General UDP checksum 11 General UDP checksum - * 12 Large Send Segment enable 12 Large Send Segment enable - * 13 Large Send Segment type 13 Large Send Segment type - * 14 VLAN tagged 14 VLAN tagged - * 15 Insert VLAN tag 15 Insert VLAN tag - * 16 IPv4 packet 16 IPv4 packet - * 17 Ethernet frame type 17 Ethernet frame type - * 18-+ 18-+ - * 19 | 19 | - * 20 | 20 | - * 21 | Custom csum offset 21 | - * 22 | (25:18) 22 | - * 23 | 23 | MSS (30:18) - * 24 | 24 | - * 25-+ 25 | - * 26-+ 26 | - * 27 | 27 | - * 28 | Reserved 28 | - * 29 | 29 | - * 30-+ 30-+ - * 31 End of packet 31 End of packet - */ -struct alx_txd { - __le16 len; - __le16 vlan_tag; - __le32 word1; - union { - __le64 addr; - struct { - __le32 pkt_len; - __le32 resvd; - } l; - } adrl; -} __packed; - -/* tpd word 1 */ -#define TPD_CXSUMSTART_MASK 0x00FF -#define TPD_CXSUMSTART_SHIFT 0 -#define TPD_L4HDROFFSET_MASK 0x00FF -#define TPD_L4HDROFFSET_SHIFT 0 -#define TPD_CXSUM_EN_MASK 0x0001 -#define TPD_CXSUM_EN_SHIFT 8 -#define TPD_IP_XSUM_MASK 0x0001 -#define TPD_IP_XSUM_SHIFT 9 -#define TPD_TCP_XSUM_MASK 0x0001 -#define TPD_TCP_XSUM_SHIFT 10 -#define TPD_UDP_XSUM_MASK 0x0001 -#define TPD_UDP_XSUM_SHIFT 11 -#define TPD_LSO_EN_MASK 0x0001 -#define TPD_LSO_EN_SHIFT 12 -#define TPD_LSO_V2_MASK 0x0001 -#define TPD_LSO_V2_SHIFT 13 -#define TPD_VLTAGGED_MASK 0x0001 -#define TPD_VLTAGGED_SHIFT 14 -#define TPD_INS_VLTAG_MASK 0x0001 -#define TPD_INS_VLTAG_SHIFT 15 -#define TPD_IPV4_MASK 0x0001 -#define TPD_IPV4_SHIFT 16 -#define TPD_ETHTYPE_MASK 0x0001 -#define TPD_ETHTYPE_SHIFT 17 -#define TPD_CXSUMOFFSET_MASK 0x00FF -#define TPD_CXSUMOFFSET_SHIFT 18 -#define TPD_MSS_MASK 0x1FFF -#define TPD_MSS_SHIFT 18 -#define TPD_EOP_MASK 0x0001 -#define TPD_EOP_SHIFT 31 - -#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK) - -/* Receive Free Descriptor */ -struct alx_rfd { - __le64 addr; /* data buffer address, length is - * declared in register --- every - * buffer has the same size - */ -} __packed; - -/* Receive Return Descriptor, contains 4 32-bit words. - * - * 31 16 0 - * +----------------+----------------+ - * | Word 0 | - * +----------------+----------------+ - * | Word 1: RSS Hash value | - * +----------------+----------------+ - * | Word 2 | - * +----------------+----------------+ - * | Word 3 | - * +----------------+----------------+ - * - * Word 0 depiction & Word 2 depiction: - * - * 0--+ 0--+ - * 1 | 1 | - * 2 | 2 | - * 3 | 3 | - * 4 | 4 | - * 5 | 5 | - * 6 | 6 | - * 7 | IP payload checksum 7 | VLAN tag - * 8 | (15:0) 8 | (15:0) - * 9 | 9 | - * 10 | 10 | - * 11 | 11 | - * 12 | 12 | - * 13 | 13 | - * 14 | 14 | - * 15-+ 15-+ - * 16-+ 16-+ - * 17 | Number of RFDs 17 | - * 18 | (19:16) 18 | - * 19-+ 19 | Protocol ID - * 20-+ 20 | (23:16) - * 21 | 21 | - * 22 | 22 | - * 23 | 23-+ - * 24 | 24 | Reserved - * 25 | Start index of RFD-ring 25-+ - * 26 | (31:20) 26 | RSS Q-num (27:25) - * 27 | 27-+ - * 28 | 28-+ - * 29 | 29 | RSS Hash algorithm - * 30 | 30 | (31:28) - * 31-+ 31-+ - * - * Word 3 depiction: - * - * 0--+ - * 1 | - * 2 | - * 3 | - * 4 | - * 5 | - * 6 | - * 7 | Packet length (include FCS) - * 8 | (13:0) - * 9 | - * 10 | - * 11 | - * 12 | - * 13-+ - * 14 L4 Header checksum error - * 15 IPv4 checksum error - * 16 VLAN tagged - * 17-+ - * 18 | Protocol ID (19:17) - * 19-+ - * 20 Receive error summary - * 21 FCS(CRC) error - * 22 Frame alignment error - * 23 Truncated packet - * 24 Runt packet - * 25 Incomplete packet due to insufficient rx-desc - * 26 Broadcast packet - * 27 Multicast packet - * 28 Ethernet type (EII or 802.3) - * 29 FIFO overflow - * 30 Length error (for 802.3, length field mismatch with actual len) - * 31 Updated, indicate to driver that this RRD is refreshed. - */ -struct alx_rrd { - __le32 word0; - __le32 rss_hash; - __le32 word2; - __le32 word3; -} __packed; - -/* rrd word 0 */ -#define RRD_XSUM_MASK 0xFFFF -#define RRD_XSUM_SHIFT 0 -#define RRD_NOR_MASK 0x000F -#define RRD_NOR_SHIFT 16 -#define RRD_SI_MASK 0x0FFF -#define RRD_SI_SHIFT 20 - -/* rrd word 2 */ -#define RRD_VLTAG_MASK 0xFFFF -#define RRD_VLTAG_SHIFT 0 -#define RRD_PID_MASK 0x00FF -#define RRD_PID_SHIFT 16 -/* non-ip packet */ -#define RRD_PID_NONIP 0 -/* ipv4(only) */ -#define RRD_PID_IPV4 1 -/* tcp/ipv6 */ -#define RRD_PID_IPV6TCP 2 -/* tcp/ipv4 */ -#define RRD_PID_IPV4TCP 3 -/* udp/ipv6 */ -#define RRD_PID_IPV6UDP 4 -/* udp/ipv4 */ -#define RRD_PID_IPV4UDP 5 -/* ipv6(only) */ -#define RRD_PID_IPV6 6 -/* LLDP packet */ -#define RRD_PID_LLDP 7 -/* 1588 packet */ -#define RRD_PID_1588 8 -#define RRD_RSSQ_MASK 0x0007 -#define RRD_RSSQ_SHIFT 25 -#define RRD_RSSALG_MASK 0x000F -#define RRD_RSSALG_SHIFT 28 -#define RRD_RSSALG_TCPV6 0x1 -#define RRD_RSSALG_IPV6 0x2 -#define RRD_RSSALG_TCPV4 0x4 -#define RRD_RSSALG_IPV4 0x8 - -/* rrd word 3 */ -#define RRD_PKTLEN_MASK 0x3FFF -#define RRD_PKTLEN_SHIFT 0 -#define RRD_ERR_L4_MASK 0x0001 -#define RRD_ERR_L4_SHIFT 14 -#define RRD_ERR_IPV4_MASK 0x0001 -#define RRD_ERR_IPV4_SHIFT 15 -#define RRD_VLTAGGED_MASK 0x0001 -#define RRD_VLTAGGED_SHIFT 16 -#define RRD_OLD_PID_MASK 0x0007 -#define RRD_OLD_PID_SHIFT 17 -#define RRD_ERR_RES_MASK 0x0001 -#define RRD_ERR_RES_SHIFT 20 -#define RRD_ERR_FCS_MASK 0x0001 -#define RRD_ERR_FCS_SHIFT 21 -#define RRD_ERR_FAE_MASK 0x0001 -#define RRD_ERR_FAE_SHIFT 22 -#define RRD_ERR_TRUNC_MASK 0x0001 -#define RRD_ERR_TRUNC_SHIFT 23 -#define RRD_ERR_RUNT_MASK 0x0001 -#define RRD_ERR_RUNT_SHIFT 24 -#define RRD_ERR_ICMP_MASK 0x0001 -#define RRD_ERR_ICMP_SHIFT 25 -#define RRD_BCAST_MASK 0x0001 -#define RRD_BCAST_SHIFT 26 -#define RRD_MCAST_MASK 0x0001 -#define RRD_MCAST_SHIFT 27 -#define RRD_ETHTYPE_MASK 0x0001 -#define RRD_ETHTYPE_SHIFT 28 -#define RRD_ERR_FIFOV_MASK 0x0001 -#define RRD_ERR_FIFOV_SHIFT 29 -#define RRD_ERR_LEN_MASK 0x0001 -#define RRD_ERR_LEN_SHIFT 30 -#define RRD_UPDATED_MASK 0x0001 -#define RRD_UPDATED_SHIFT 31 - - -#define ALX_MAX_SETUP_LNK_CYCLE 50 - -/* for FlowControl */ -#define ALX_FC_RX 0x01 -#define ALX_FC_TX 0x02 -#define ALX_FC_ANEG 0x04 - -/* for sleep control */ -#define ALX_SLEEP_WOL_PHY 0x00000001 -#define ALX_SLEEP_WOL_MAGIC 0x00000002 -#define ALX_SLEEP_CIFS 0x00000004 -#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \ - ALX_SLEEP_WOL_MAGIC | \ - ALX_SLEEP_CIFS) - -/* for RSS hash type */ -#define ALX_RSS_HASH_TYPE_IPV4 0x1 -#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2 -#define ALX_RSS_HASH_TYPE_IPV6 0x4 -#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8 -#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \ - ALX_RSS_HASH_TYPE_IPV4_TCP | \ - ALX_RSS_HASH_TYPE_IPV6 | \ - ALX_RSS_HASH_TYPE_IPV6_TCP) -#define ALX_DEF_RXBUF_SIZE 1536 -#define ALX_MAX_JUMBO_PKT_SIZE (9*1024) -#define ALX_MAX_TSO_PKT_SIZE (7*1024) -#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE -#define ALX_MIN_FRAME_SIZE 68 -#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) - -#define ALX_MAX_RX_QUEUES 8 -#define ALX_MAX_TX_QUEUES 4 -#define ALX_MAX_HANDLED_INTRS 5 - -#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \ - ALX_ISR_DMAW | \ - ALX_ISR_DMAR | \ - ALX_ISR_SMB | \ - ALX_ISR_MANU | \ - ALX_ISR_TIMER) - -#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \ - ALX_ISR_DMAW | ALX_ISR_DMAR) - -#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \ - ALX_ISR_TXF_UR | \ - ALX_ISR_RFD_UR) - -#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \ - ALX_ISR_TX_Q1 | \ - ALX_ISR_TX_Q2 | \ - ALX_ISR_TX_Q3 | \ - ALX_ISR_RX_Q0 | \ - ALX_ISR_RX_Q1 | \ - ALX_ISR_RX_Q2 | \ - ALX_ISR_RX_Q3 | \ - ALX_ISR_RX_Q4 | \ - ALX_ISR_RX_Q5 | \ - ALX_ISR_RX_Q6 | \ - ALX_ISR_RX_Q7) - -/* maximum interrupt vectors for msix */ -#define ALX_MAX_MSIX_INTRS 16 - -#define ALX_GET_FIELD(_data, _field) \ - (((_data) >> _field ## _SHIFT) & _field ## _MASK) - -#define ALX_SET_FIELD(_data, _field, _value) do { \ - (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \ - (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\ - } while (0) - -struct alx_hw { - struct pci_dev *pdev; - u8 __iomem *hw_addr; - - /* current & permanent mac addr */ - u8 mac_addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - - u16 mtu; - u16 imt; - u8 dma_chnl; - u8 max_dma_chnl; - /* tpd threshold to trig INT */ - u32 ith_tpd; - u32 rx_ctrl; - u32 mc_hash[2]; - - u32 smb_timer; - /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */ - int link_speed; - - /* auto-neg advertisement or force mode config */ - u32 adv_cfg; - u8 flowctrl; - - u32 sleep_ctrl; - - spinlock_t mdio_lock; - struct mdio_if_info mdio; - u16 phy_id[2]; - - /* PHY link patch flag */ - bool lnk_patch; -}; - -static inline int alx_hw_revision(struct alx_hw *hw) -{ - return hw->pdev->revision >> ALX_PCI_REVID_SHIFT; -} - -static inline bool alx_hw_with_cr(struct alx_hw *hw) -{ - return hw->pdev->revision & 1; -} - -static inline bool alx_hw_giga(struct alx_hw *hw) -{ - return hw->pdev->device & 1; -} - -static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val) -{ - writeb(val, hw->hw_addr + reg); -} - -static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val) -{ - writew(val, hw->hw_addr + reg); -} - -static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg) -{ - return readw(hw->hw_addr + reg); -} - -static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val) -{ - writel(val, hw->hw_addr + reg); -} - -static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg) -{ - return readl(hw->hw_addr + reg); -} - -static inline void alx_post_write(struct alx_hw *hw) -{ - readl(hw->hw_addr); -} - -int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr); -void alx_reset_phy(struct alx_hw *hw); -void alx_reset_pcie(struct alx_hw *hw); -void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en); -int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl); -void alx_post_phy_link(struct alx_hw *hw); -int alx_pre_suspend(struct alx_hw *hw, int speed); -int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data); -int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data); -int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata); -int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data); -int alx_get_phy_link(struct alx_hw *hw, int *speed); -int alx_clear_phy_intr(struct alx_hw *hw); -int alx_config_wol(struct alx_hw *hw); -void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc); -void alx_start_mac(struct alx_hw *hw); -int alx_reset_mac(struct alx_hw *hw); -void alx_set_macaddr(struct alx_hw *hw, const u8 *addr); -bool alx_phy_configured(struct alx_hw *hw); -void alx_configure_basic(struct alx_hw *hw); -void alx_disable_rss(struct alx_hw *hw); -int alx_select_powersaving_speed(struct alx_hw *hw, int *speed); -bool alx_get_phy_info(struct alx_hw *hw); - -#endif diff --git a/trunk/drivers/net/ethernet/atheros/alx/main.c b/trunk/drivers/net/ethernet/atheros/alx/main.c deleted file mode 100644 index 418de8b13165..000000000000 --- a/trunk/drivers/net/ethernet/atheros/alx/main.c +++ /dev/null @@ -1,1625 +0,0 @@ -/* - * Copyright (c) 2013 Johannes Berg - * - * This file is free software: you may copy, redistribute and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation, either version 2 of the License, or (at your - * option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * This file incorporates work covered by the following copyright and - * permission notice: - * - * Copyright (c) 2012 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "alx.h" -#include "hw.h" -#include "reg.h" - -const char alx_drv_name[] = "alx"; - - -static void alx_free_txbuf(struct alx_priv *alx, int entry) -{ - struct alx_buffer *txb = &alx->txq.bufs[entry]; - - if (dma_unmap_len(txb, size)) { - dma_unmap_single(&alx->hw.pdev->dev, - dma_unmap_addr(txb, dma), - dma_unmap_len(txb, size), - DMA_TO_DEVICE); - dma_unmap_len_set(txb, size, 0); - } - - if (txb->skb) { - dev_kfree_skb_any(txb->skb); - txb->skb = NULL; - } -} - -static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) -{ - struct alx_rx_queue *rxq = &alx->rxq; - struct sk_buff *skb; - struct alx_buffer *cur_buf; - dma_addr_t dma; - u16 cur, next, count = 0; - - next = cur = rxq->write_idx; - if (++next == alx->rx_ringsz) - next = 0; - cur_buf = &rxq->bufs[cur]; - - while (!cur_buf->skb && next != rxq->read_idx) { - struct alx_rfd *rfd = &rxq->rfd[cur]; - - skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); - if (!skb) - break; - dma = dma_map_single(&alx->hw.pdev->dev, - skb->data, alx->rxbuf_size, - DMA_FROM_DEVICE); - if (dma_mapping_error(&alx->hw.pdev->dev, dma)) { - dev_kfree_skb(skb); - break; - } - - /* Unfortunately, RX descriptor buffers must be 4-byte - * aligned, so we can't use IP alignment. - */ - if (WARN_ON(dma & 3)) { - dev_kfree_skb(skb); - break; - } - - cur_buf->skb = skb; - dma_unmap_len_set(cur_buf, size, alx->rxbuf_size); - dma_unmap_addr_set(cur_buf, dma, dma); - rfd->addr = cpu_to_le64(dma); - - cur = next; - if (++next == alx->rx_ringsz) - next = 0; - cur_buf = &rxq->bufs[cur]; - count++; - } - - if (count) { - /* flush all updates before updating hardware */ - wmb(); - rxq->write_idx = cur; - alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); - } - - return count; -} - -static inline int alx_tpd_avail(struct alx_priv *alx) -{ - struct alx_tx_queue *txq = &alx->txq; - - if (txq->write_idx >= txq->read_idx) - return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; - return txq->read_idx - txq->write_idx - 1; -} - -static bool alx_clean_tx_irq(struct alx_priv *alx) -{ - struct alx_tx_queue *txq = &alx->txq; - u16 hw_read_idx, sw_read_idx; - unsigned int total_bytes = 0, total_packets = 0; - int budget = ALX_DEFAULT_TX_WORK; - - sw_read_idx = txq->read_idx; - hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX); - - if (sw_read_idx != hw_read_idx) { - while (sw_read_idx != hw_read_idx && budget > 0) { - struct sk_buff *skb; - - skb = txq->bufs[sw_read_idx].skb; - if (skb) { - total_bytes += skb->len; - total_packets++; - budget--; - } - - alx_free_txbuf(alx, sw_read_idx); - - if (++sw_read_idx == alx->tx_ringsz) - sw_read_idx = 0; - } - txq->read_idx = sw_read_idx; - - netdev_completed_queue(alx->dev, total_packets, total_bytes); - } - - if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) && - alx_tpd_avail(alx) > alx->tx_ringsz/4) - netif_wake_queue(alx->dev); - - return sw_read_idx == hw_read_idx; -} - -static void alx_schedule_link_check(struct alx_priv *alx) -{ - schedule_work(&alx->link_check_wk); -} - -static void alx_schedule_reset(struct alx_priv *alx) -{ - schedule_work(&alx->reset_wk); -} - -static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) -{ - struct alx_rx_queue *rxq = &alx->rxq; - struct alx_rrd *rrd; - struct alx_buffer *rxb; - struct sk_buff *skb; - u16 length, rfd_cleaned = 0; - - while (budget > 0) { - rrd = &rxq->rrd[rxq->rrd_read_idx]; - if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) - break; - rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT); - - if (ALX_GET_FIELD(le32_to_cpu(rrd->word0), - RRD_SI) != rxq->read_idx || - ALX_GET_FIELD(le32_to_cpu(rrd->word0), - RRD_NOR) != 1) { - alx_schedule_reset(alx); - return 0; - } - - rxb = &rxq->bufs[rxq->read_idx]; - dma_unmap_single(&alx->hw.pdev->dev, - dma_unmap_addr(rxb, dma), - dma_unmap_len(rxb, size), - DMA_FROM_DEVICE); - dma_unmap_len_set(rxb, size, 0); - skb = rxb->skb; - rxb->skb = NULL; - - if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) || - rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) { - rrd->word3 = 0; - dev_kfree_skb_any(skb); - goto next_pkt; - } - - length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), - RRD_PKTLEN) - ETH_FCS_LEN; - skb_put(skb, length); - skb->protocol = eth_type_trans(skb, alx->dev); - - skb_checksum_none_assert(skb); - if (alx->dev->features & NETIF_F_RXCSUM && - !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) | - cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) { - switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2), - RRD_PID)) { - case RRD_PID_IPV6UDP: - case RRD_PID_IPV4UDP: - case RRD_PID_IPV4TCP: - case RRD_PID_IPV6TCP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } - } - - napi_gro_receive(&alx->napi, skb); - budget--; - -next_pkt: - if (++rxq->read_idx == alx->rx_ringsz) - rxq->read_idx = 0; - if (++rxq->rrd_read_idx == alx->rx_ringsz) - rxq->rrd_read_idx = 0; - - if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) - rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC); - } - - if (rfd_cleaned) - alx_refill_rx_ring(alx, GFP_ATOMIC); - - return budget > 0; -} - -static int alx_poll(struct napi_struct *napi, int budget) -{ - struct alx_priv *alx = container_of(napi, struct alx_priv, napi); - struct alx_hw *hw = &alx->hw; - bool complete = true; - unsigned long flags; - - complete = alx_clean_tx_irq(alx) && - alx_clean_rx_irq(alx, budget); - - if (!complete) - return 1; - - napi_complete(&alx->napi); - - /* enable interrupt */ - spin_lock_irqsave(&alx->irq_lock, flags); - alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; - alx_write_mem32(hw, ALX_IMR, alx->int_mask); - spin_unlock_irqrestore(&alx->irq_lock, flags); - - alx_post_write(hw); - - return 0; -} - -static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) -{ - struct alx_hw *hw = &alx->hw; - bool write_int_mask = false; - - spin_lock(&alx->irq_lock); - - /* ACK interrupt */ - alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS); - intr &= alx->int_mask; - - if (intr & ALX_ISR_FATAL) { - netif_warn(alx, hw, alx->dev, - "fatal interrupt 0x%x, resetting\n", intr); - alx_schedule_reset(alx); - goto out; - } - - if (intr & ALX_ISR_ALERT) - netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr); - - if (intr & ALX_ISR_PHY) { - /* suppress PHY interrupt, because the source - * is from PHY internal. only the internal status - * is cleared, the interrupt status could be cleared. - */ - alx->int_mask &= ~ALX_ISR_PHY; - write_int_mask = true; - alx_schedule_link_check(alx); - } - - if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { - napi_schedule(&alx->napi); - /* mask rx/tx interrupt, enable them when napi complete */ - alx->int_mask &= ~ALX_ISR_ALL_QUEUES; - write_int_mask = true; - } - - if (write_int_mask) - alx_write_mem32(hw, ALX_IMR, alx->int_mask); - - alx_write_mem32(hw, ALX_ISR, 0); - - out: - spin_unlock(&alx->irq_lock); - return IRQ_HANDLED; -} - -static irqreturn_t alx_intr_msi(int irq, void *data) -{ - struct alx_priv *alx = data; - - return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR)); -} - -static irqreturn_t alx_intr_legacy(int irq, void *data) -{ - struct alx_priv *alx = data; - struct alx_hw *hw = &alx->hw; - u32 intr; - - intr = alx_read_mem32(hw, ALX_ISR); - - if (intr & ALX_ISR_DIS || !(intr & alx->int_mask)) - return IRQ_NONE; - - return alx_intr_handle(alx, intr); -} - -static void alx_init_ring_ptrs(struct alx_priv *alx) -{ - struct alx_hw *hw = &alx->hw; - u32 addr_hi = ((u64)alx->descmem.dma) >> 32; - - alx->rxq.read_idx = 0; - alx->rxq.write_idx = 0; - alx->rxq.rrd_read_idx = 0; - alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); - alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma); - alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); - alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma); - alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); - alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); - - alx->txq.read_idx = 0; - alx->txq.write_idx = 0; - alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi); - alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma); - alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz); - - /* load these pointers into the chip */ - alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); -} - -static void alx_free_txring_buf(struct alx_priv *alx) -{ - struct alx_tx_queue *txq = &alx->txq; - int i; - - if (!txq->bufs) - return; - - for (i = 0; i < alx->tx_ringsz; i++) - alx_free_txbuf(alx, i); - - memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer)); - memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd)); - txq->write_idx = 0; - txq->read_idx = 0; - - netdev_reset_queue(alx->dev); -} - -static void alx_free_rxring_buf(struct alx_priv *alx) -{ - struct alx_rx_queue *rxq = &alx->rxq; - struct alx_buffer *cur_buf; - u16 i; - - if (rxq == NULL) - return; - - for (i = 0; i < alx->rx_ringsz; i++) { - cur_buf = rxq->bufs + i; - if (cur_buf->skb) { - dma_unmap_single(&alx->hw.pdev->dev, - dma_unmap_addr(cur_buf, dma), - dma_unmap_len(cur_buf, size), - DMA_FROM_DEVICE); - dev_kfree_skb(cur_buf->skb); - cur_buf->skb = NULL; - dma_unmap_len_set(cur_buf, size, 0); - dma_unmap_addr_set(cur_buf, dma, 0); - } - } - - rxq->write_idx = 0; - rxq->read_idx = 0; - rxq->rrd_read_idx = 0; -} - -static void alx_free_buffers(struct alx_priv *alx) -{ - alx_free_txring_buf(alx); - alx_free_rxring_buf(alx); -} - -static int alx_reinit_rings(struct alx_priv *alx) -{ - alx_free_buffers(alx); - - alx_init_ring_ptrs(alx); - - if (!alx_refill_rx_ring(alx, GFP_KERNEL)) - return -ENOMEM; - - return 0; -} - -static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash) -{ - u32 crc32, bit, reg; - - crc32 = ether_crc(ETH_ALEN, addr); - reg = (crc32 >> 31) & 0x1; - bit = (crc32 >> 26) & 0x1F; - - mc_hash[reg] |= BIT(bit); -} - -static void __alx_set_rx_mode(struct net_device *netdev) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - struct netdev_hw_addr *ha; - u32 mc_hash[2] = {}; - - if (!(netdev->flags & IFF_ALLMULTI)) { - netdev_for_each_mc_addr(ha, netdev) - alx_add_mc_addr(hw, ha->addr, mc_hash); - - alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]); - alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]); - } - - hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); - if (netdev->flags & IFF_PROMISC) - hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; - if (netdev->flags & IFF_ALLMULTI) - hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; - - alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); -} - -static void alx_set_rx_mode(struct net_device *netdev) -{ - __alx_set_rx_mode(netdev); -} - -static int alx_set_mac_address(struct net_device *netdev, void *data) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - struct sockaddr *addr = data; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - if (netdev->addr_assign_type & NET_ADDR_RANDOM) - netdev->addr_assign_type ^= NET_ADDR_RANDOM; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); - alx_set_macaddr(hw, hw->mac_addr); - - return 0; -} - -static int alx_alloc_descriptors(struct alx_priv *alx) -{ - alx->txq.bufs = kcalloc(alx->tx_ringsz, - sizeof(struct alx_buffer), - GFP_KERNEL); - if (!alx->txq.bufs) - return -ENOMEM; - - alx->rxq.bufs = kcalloc(alx->rx_ringsz, - sizeof(struct alx_buffer), - GFP_KERNEL); - if (!alx->rxq.bufs) - goto out_free; - - /* physical tx/rx ring descriptors - * - * Allocate them as a single chunk because they must not cross a - * 4G boundary (hardware has a single register for high 32 bits - * of addresses only) - */ - alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz + - sizeof(struct alx_rrd) * alx->rx_ringsz + - sizeof(struct alx_rfd) * alx->rx_ringsz; - alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, - alx->descmem.size, - &alx->descmem.dma, - GFP_KERNEL); - if (!alx->descmem.virt) - goto out_free; - - alx->txq.tpd = (void *)alx->descmem.virt; - alx->txq.tpd_dma = alx->descmem.dma; - - /* alignment requirement for next block */ - BUILD_BUG_ON(sizeof(struct alx_txd) % 8); - - alx->rxq.rrd = - (void *)((u8 *)alx->descmem.virt + - sizeof(struct alx_txd) * alx->tx_ringsz); - alx->rxq.rrd_dma = alx->descmem.dma + - sizeof(struct alx_txd) * alx->tx_ringsz; - - /* alignment requirement for next block */ - BUILD_BUG_ON(sizeof(struct alx_rrd) % 8); - - alx->rxq.rfd = - (void *)((u8 *)alx->descmem.virt + - sizeof(struct alx_txd) * alx->tx_ringsz + - sizeof(struct alx_rrd) * alx->rx_ringsz); - alx->rxq.rfd_dma = alx->descmem.dma + - sizeof(struct alx_txd) * alx->tx_ringsz + - sizeof(struct alx_rrd) * alx->rx_ringsz; - - return 0; -out_free: - kfree(alx->txq.bufs); - kfree(alx->rxq.bufs); - return -ENOMEM; -} - -static int alx_alloc_rings(struct alx_priv *alx) -{ - int err; - - err = alx_alloc_descriptors(alx); - if (err) - return err; - - alx->int_mask &= ~ALX_ISR_ALL_QUEUES; - alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; - alx->tx_ringsz = alx->tx_ringsz; - - netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); - - alx_reinit_rings(alx); - return 0; -} - -static void alx_free_rings(struct alx_priv *alx) -{ - netif_napi_del(&alx->napi); - alx_free_buffers(alx); - - kfree(alx->txq.bufs); - kfree(alx->rxq.bufs); - - dma_free_coherent(&alx->hw.pdev->dev, - alx->descmem.size, - alx->descmem.virt, - alx->descmem.dma); -} - -static void alx_config_vector_mapping(struct alx_priv *alx) -{ - struct alx_hw *hw = &alx->hw; - - alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0); - alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0); - alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); -} - -static void alx_irq_enable(struct alx_priv *alx) -{ - struct alx_hw *hw = &alx->hw; - - /* level-1 interrupt switch */ - alx_write_mem32(hw, ALX_ISR, 0); - alx_write_mem32(hw, ALX_IMR, alx->int_mask); - alx_post_write(hw); -} - -static void alx_irq_disable(struct alx_priv *alx) -{ - struct alx_hw *hw = &alx->hw; - - alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); - alx_write_mem32(hw, ALX_IMR, 0); - alx_post_write(hw); - - synchronize_irq(alx->hw.pdev->irq); -} - -static int alx_request_irq(struct alx_priv *alx) -{ - struct pci_dev *pdev = alx->hw.pdev; - struct alx_hw *hw = &alx->hw; - int err; - u32 msi_ctrl; - - msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; - - if (!pci_enable_msi(alx->hw.pdev)) { - alx->msi = true; - - alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, - msi_ctrl | ALX_MSI_MASK_SEL_LINE); - err = request_irq(pdev->irq, alx_intr_msi, 0, - alx->dev->name, alx); - if (!err) - goto out; - /* fall back to legacy interrupt */ - pci_disable_msi(alx->hw.pdev); - } - - alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0); - err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED, - alx->dev->name, alx); -out: - if (!err) - alx_config_vector_mapping(alx); - return err; -} - -static void alx_free_irq(struct alx_priv *alx) -{ - struct pci_dev *pdev = alx->hw.pdev; - - free_irq(pdev->irq, alx); - - if (alx->msi) { - pci_disable_msi(alx->hw.pdev); - alx->msi = false; - } -} - -static int alx_identify_hw(struct alx_priv *alx) -{ - struct alx_hw *hw = &alx->hw; - int rev = alx_hw_revision(hw); - - if (rev > ALX_REV_C0) - return -EINVAL; - - hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; - - return 0; -} - -static int alx_init_sw(struct alx_priv *alx) -{ - struct pci_dev *pdev = alx->hw.pdev; - struct alx_hw *hw = &alx->hw; - int err; - - err = alx_identify_hw(alx); - if (err) { - dev_err(&pdev->dev, "unrecognized chip, aborting\n"); - return err; - } - - alx->hw.lnk_patch = - pdev->device == ALX_DEV_ID_AR8161 && - pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC && - pdev->subsystem_device == 0x0091 && - pdev->revision == 0; - - hw->smb_timer = 400; - hw->mtu = alx->dev->mtu; - alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); - alx->tx_ringsz = 256; - alx->rx_ringsz = 512; - hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY; - hw->imt = 200; - alx->int_mask = ALX_ISR_MISC; - hw->dma_chnl = hw->max_dma_chnl; - hw->ith_tpd = alx->tx_ringsz / 3; - hw->link_speed = SPEED_UNKNOWN; - hw->adv_cfg = ADVERTISED_Autoneg | - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_1000baseT_Full; - hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; - - hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | - ALX_MAC_CTRL_MHASH_ALG_HI5B | - ALX_MAC_CTRL_BRD_EN | - ALX_MAC_CTRL_PCRCE | - ALX_MAC_CTRL_CRCE | - ALX_MAC_CTRL_RXFC_EN | - ALX_MAC_CTRL_TXFC_EN | - 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT; - - return err; -} - - -static netdev_features_t alx_fix_features(struct net_device *netdev, - netdev_features_t features) -{ - if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) - features &= ~(NETIF_F_TSO | NETIF_F_TSO6); - - return features; -} - -static void alx_netif_stop(struct alx_priv *alx) -{ - alx->dev->trans_start = jiffies; - if (netif_carrier_ok(alx->dev)) { - netif_carrier_off(alx->dev); - netif_tx_disable(alx->dev); - napi_disable(&alx->napi); - } -} - -static void alx_halt(struct alx_priv *alx) -{ - struct alx_hw *hw = &alx->hw; - - alx_netif_stop(alx); - hw->link_speed = SPEED_UNKNOWN; - - alx_reset_mac(hw); - - /* disable l0s/l1 */ - alx_enable_aspm(hw, false, false); - alx_irq_disable(alx); - alx_free_buffers(alx); -} - -static void alx_configure(struct alx_priv *alx) -{ - struct alx_hw *hw = &alx->hw; - - alx_configure_basic(hw); - alx_disable_rss(hw); - __alx_set_rx_mode(alx->dev); - - alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); -} - -static void alx_activate(struct alx_priv *alx) -{ - /* hardware setting lost, restore it */ - alx_reinit_rings(alx); - alx_configure(alx); - - /* clear old interrupts */ - alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); - - alx_irq_enable(alx); - - alx_schedule_link_check(alx); -} - -static void alx_reinit(struct alx_priv *alx) -{ - ASSERT_RTNL(); - - alx_halt(alx); - alx_activate(alx); -} - -static int alx_change_mtu(struct net_device *netdev, int mtu) -{ - struct alx_priv *alx = netdev_priv(netdev); - int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - - if ((max_frame < ALX_MIN_FRAME_SIZE) || - (max_frame > ALX_MAX_FRAME_SIZE)) - return -EINVAL; - - if (netdev->mtu == mtu) - return 0; - - netdev->mtu = mtu; - alx->hw.mtu = mtu; - alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ? - ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE; - netdev_update_features(netdev); - if (netif_running(netdev)) - alx_reinit(alx); - return 0; -} - -static void alx_netif_start(struct alx_priv *alx) -{ - netif_tx_wake_all_queues(alx->dev); - napi_enable(&alx->napi); - netif_carrier_on(alx->dev); -} - -static int __alx_open(struct alx_priv *alx, bool resume) -{ - int err; - - if (!resume) - netif_carrier_off(alx->dev); - - err = alx_alloc_rings(alx); - if (err) - return err; - - alx_configure(alx); - - err = alx_request_irq(alx); - if (err) - goto out_free_rings; - - /* clear old interrupts */ - alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); - - alx_irq_enable(alx); - - if (!resume) - netif_tx_start_all_queues(alx->dev); - - alx_schedule_link_check(alx); - return 0; - -out_free_rings: - alx_free_rings(alx); - return err; -} - -static void __alx_stop(struct alx_priv *alx) -{ - alx_halt(alx); - alx_free_irq(alx); - alx_free_rings(alx); -} - -static const char *alx_speed_desc(u16 speed) -{ - switch (speed) { - case SPEED_1000 + DUPLEX_FULL: - return "1 Gbps Full"; - case SPEED_100 + DUPLEX_FULL: - return "100 Mbps Full"; - case SPEED_100 + DUPLEX_HALF: - return "100 Mbps Half"; - case SPEED_10 + DUPLEX_FULL: - return "10 Mbps Full"; - case SPEED_10 + DUPLEX_HALF: - return "10 Mbps Half"; - default: - return "Unknown speed"; - } -} - -static void alx_check_link(struct alx_priv *alx) -{ - struct alx_hw *hw = &alx->hw; - unsigned long flags; - int speed, old_speed; - int err; - - /* clear PHY internal interrupt status, otherwise the main - * interrupt status will be asserted forever - */ - alx_clear_phy_intr(hw); - - err = alx_get_phy_link(hw, &speed); - if (err < 0) - goto reset; - - spin_lock_irqsave(&alx->irq_lock, flags); - alx->int_mask |= ALX_ISR_PHY; - alx_write_mem32(hw, ALX_IMR, alx->int_mask); - spin_unlock_irqrestore(&alx->irq_lock, flags); - - old_speed = hw->link_speed; - - if (old_speed == speed) - return; - hw->link_speed = speed; - - if (speed != SPEED_UNKNOWN) { - netif_info(alx, link, alx->dev, - "NIC Up: %s\n", alx_speed_desc(speed)); - alx_post_phy_link(hw); - alx_enable_aspm(hw, true, true); - alx_start_mac(hw); - - if (old_speed == SPEED_UNKNOWN) - alx_netif_start(alx); - } else { - /* link is now down */ - alx_netif_stop(alx); - netif_info(alx, link, alx->dev, "Link Down\n"); - err = alx_reset_mac(hw); - if (err) - goto reset; - alx_irq_disable(alx); - - /* MAC reset causes all HW settings to be lost, restore all */ - err = alx_reinit_rings(alx); - if (err) - goto reset; - alx_configure(alx); - alx_enable_aspm(hw, false, true); - alx_post_phy_link(hw); - alx_irq_enable(alx); - } - - return; - -reset: - alx_schedule_reset(alx); -} - -static int alx_open(struct net_device *netdev) -{ - return __alx_open(netdev_priv(netdev), false); -} - -static int alx_stop(struct net_device *netdev) -{ - __alx_stop(netdev_priv(netdev)); - return 0; -} - -static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en) -{ - struct alx_priv *alx = pci_get_drvdata(pdev); - struct net_device *netdev = alx->dev; - struct alx_hw *hw = &alx->hw; - int err, speed; - - netif_device_detach(netdev); - - if (netif_running(netdev)) - __alx_stop(alx); - -#ifdef CONFIG_PM_SLEEP - err = pci_save_state(pdev); - if (err) - return err; -#endif - - err = alx_select_powersaving_speed(hw, &speed); - if (err) - return err; - err = alx_clear_phy_intr(hw); - if (err) - return err; - err = alx_pre_suspend(hw, speed); - if (err) - return err; - err = alx_config_wol(hw); - if (err) - return err; - - *wol_en = false; - if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) { - netif_info(alx, wol, netdev, - "wol: ctrl=%X, speed=%X\n", - hw->sleep_ctrl, speed); - device_set_wakeup_enable(&pdev->dev, true); - *wol_en = true; - } - - pci_disable_device(pdev); - - return 0; -} - -static void alx_shutdown(struct pci_dev *pdev) -{ - int err; - bool wol_en; - - err = __alx_shutdown(pdev, &wol_en); - if (!err) { - pci_wake_from_d3(pdev, wol_en); - pci_set_power_state(pdev, PCI_D3hot); - } else { - dev_err(&pdev->dev, "shutdown fail %d\n", err); - } -} - -static void alx_link_check(struct work_struct *work) -{ - struct alx_priv *alx; - - alx = container_of(work, struct alx_priv, link_check_wk); - - rtnl_lock(); - alx_check_link(alx); - rtnl_unlock(); -} - -static void alx_reset(struct work_struct *work) -{ - struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk); - - rtnl_lock(); - alx_reinit(alx); - rtnl_unlock(); -} - -static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) -{ - u8 cso, css; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - cso = skb_checksum_start_offset(skb); - if (cso & 1) - return -EINVAL; - - css = cso + skb->csum_offset; - first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT); - first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT); - first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT); - - return 0; -} - -static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) -{ - struct alx_tx_queue *txq = &alx->txq; - struct alx_txd *tpd, *first_tpd; - dma_addr_t dma; - int maplen, f, first_idx = txq->write_idx; - - first_tpd = &txq->tpd[txq->write_idx]; - tpd = first_tpd; - - maplen = skb_headlen(skb); - dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, - DMA_TO_DEVICE); - if (dma_mapping_error(&alx->hw.pdev->dev, dma)) - goto err_dma; - - dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); - dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); - - tpd->adrl.addr = cpu_to_le64(dma); - tpd->len = cpu_to_le16(maplen); - - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; - - if (++txq->write_idx == alx->tx_ringsz) - txq->write_idx = 0; - tpd = &txq->tpd[txq->write_idx]; - - tpd->word1 = first_tpd->word1; - - maplen = skb_frag_size(frag); - dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0, - maplen, DMA_TO_DEVICE); - if (dma_mapping_error(&alx->hw.pdev->dev, dma)) - goto err_dma; - dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); - dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); - - tpd->adrl.addr = cpu_to_le64(dma); - tpd->len = cpu_to_le16(maplen); - } - - /* last TPD, set EOP flag and store skb */ - tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); - txq->bufs[txq->write_idx].skb = skb; - - if (++txq->write_idx == alx->tx_ringsz) - txq->write_idx = 0; - - return 0; - -err_dma: - f = first_idx; - while (f != txq->write_idx) { - alx_free_txbuf(alx, f); - if (++f == alx->tx_ringsz) - f = 0; - } - return -ENOMEM; -} - -static netdev_tx_t alx_start_xmit(struct sk_buff *skb, - struct net_device *netdev) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_tx_queue *txq = &alx->txq; - struct alx_txd *first; - int tpdreq = skb_shinfo(skb)->nr_frags + 1; - - if (alx_tpd_avail(alx) < tpdreq) { - netif_stop_queue(alx->dev); - goto drop; - } - - first = &txq->tpd[txq->write_idx]; - memset(first, 0, sizeof(*first)); - - if (alx_tx_csum(skb, first)) - goto drop; - - if (alx_map_tx_skb(alx, skb) < 0) - goto drop; - - netdev_sent_queue(alx->dev, skb->len); - - /* flush updates before updating hardware */ - wmb(); - alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx); - - if (alx_tpd_avail(alx) < alx->tx_ringsz/8) - netif_stop_queue(alx->dev); - - return NETDEV_TX_OK; - -drop: - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -static void alx_tx_timeout(struct net_device *dev) -{ - struct alx_priv *alx = netdev_priv(dev); - - alx_schedule_reset(alx); -} - -static int alx_mdio_read(struct net_device *netdev, - int prtad, int devad, u16 addr) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - u16 val; - int err; - - if (prtad != hw->mdio.prtad) - return -EINVAL; - - if (devad == MDIO_DEVAD_NONE) - err = alx_read_phy_reg(hw, addr, &val); - else - err = alx_read_phy_ext(hw, devad, addr, &val); - - if (err) - return err; - return val; -} - -static int alx_mdio_write(struct net_device *netdev, - int prtad, int devad, u16 addr, u16 val) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - - if (prtad != hw->mdio.prtad) - return -EINVAL; - - if (devad == MDIO_DEVAD_NONE) - return alx_write_phy_reg(hw, addr, val); - - return alx_write_phy_ext(hw, devad, addr, val); -} - -static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - struct alx_priv *alx = netdev_priv(netdev); - - if (!netif_running(netdev)) - return -EAGAIN; - - return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void alx_poll_controller(struct net_device *netdev) -{ - struct alx_priv *alx = netdev_priv(netdev); - - if (alx->msi) - alx_intr_msi(0, alx); - else - alx_intr_legacy(0, alx); -} -#endif - -static const struct net_device_ops alx_netdev_ops = { - .ndo_open = alx_open, - .ndo_stop = alx_stop, - .ndo_start_xmit = alx_start_xmit, - .ndo_set_rx_mode = alx_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = alx_set_mac_address, - .ndo_change_mtu = alx_change_mtu, - .ndo_do_ioctl = alx_ioctl, - .ndo_tx_timeout = alx_tx_timeout, - .ndo_fix_features = alx_fix_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = alx_poll_controller, -#endif -}; - -static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct alx_priv *alx; - struct alx_hw *hw; - bool phy_configured; - int bars, pm_cap, err; - - err = pci_enable_device_mem(pdev); - if (err) - return err; - - /* The alx chip can DMA to 64-bit addresses, but it uses a single - * shared register for the high 32 bits, so only a single, aligned, - * 4 GB physical address range can be used for descriptors. - */ - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && - !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { - dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA config, aborting\n"); - goto out_pci_disable; - } - } - } - - bars = pci_select_bars(pdev, IORESOURCE_MEM); - err = pci_request_selected_regions(pdev, bars, alx_drv_name); - if (err) { - dev_err(&pdev->dev, - "pci_request_selected_regions failed(bars:%d)\n", bars); - goto out_pci_disable; - } - - pci_enable_pcie_error_reporting(pdev); - pci_set_master(pdev); - - pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (pm_cap == 0) { - dev_err(&pdev->dev, - "Can't find power management capability, aborting\n"); - err = -EIO; - goto out_pci_release; - } - - err = pci_set_power_state(pdev, PCI_D0); - if (err) - goto out_pci_release; - - netdev = alloc_etherdev(sizeof(*alx)); - if (!netdev) { - err = -ENOMEM; - goto out_pci_release; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - alx = netdev_priv(netdev); - alx->dev = netdev; - alx->hw.pdev = pdev; - alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | - NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL; - hw = &alx->hw; - pci_set_drvdata(pdev, alx); - - hw->hw_addr = pci_ioremap_bar(pdev, 0); - if (!hw->hw_addr) { - dev_err(&pdev->dev, "cannot map device registers\n"); - err = -EIO; - goto out_free_netdev; - } - - netdev->netdev_ops = &alx_netdev_ops; - SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops); - netdev->irq = pdev->irq; - netdev->watchdog_timeo = ALX_WATCHDOG_TIME; - - if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) - pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; - - err = alx_init_sw(alx); - if (err) { - dev_err(&pdev->dev, "net device private data init failed\n"); - goto out_unmap; - } - - alx_reset_pcie(hw); - - phy_configured = alx_phy_configured(hw); - - if (!phy_configured) - alx_reset_phy(hw); - - err = alx_reset_mac(hw); - if (err) { - dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); - goto out_unmap; - } - - /* setup link to put it in a known good starting state */ - if (!phy_configured) { - err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); - if (err) { - dev_err(&pdev->dev, - "failed to configure PHY speed/duplex (err=%d)\n", - err); - goto out_unmap; - } - } - - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; - - if (alx_get_perm_macaddr(hw, hw->perm_addr)) { - dev_warn(&pdev->dev, - "Invalid permanent address programmed, using random one\n"); - eth_hw_addr_random(netdev); - memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); - } - - memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); - memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); - memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); - - hw->mdio.prtad = 0; - hw->mdio.mmds = 0; - hw->mdio.dev = netdev; - hw->mdio.mode_support = MDIO_SUPPORTS_C45 | - MDIO_SUPPORTS_C22 | - MDIO_EMULATE_C22; - hw->mdio.mdio_read = alx_mdio_read; - hw->mdio.mdio_write = alx_mdio_write; - - if (!alx_get_phy_info(hw)) { - dev_err(&pdev->dev, "failed to identify PHY\n"); - err = -EIO; - goto out_unmap; - } - - INIT_WORK(&alx->link_check_wk, alx_link_check); - INIT_WORK(&alx->reset_wk, alx_reset); - spin_lock_init(&alx->hw.mdio_lock); - spin_lock_init(&alx->irq_lock); - - netif_carrier_off(netdev); - - err = register_netdev(netdev); - if (err) { - dev_err(&pdev->dev, "register netdevice failed\n"); - goto out_unmap; - } - - device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl); - - netdev_info(netdev, - "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", - netdev->dev_addr); - - return 0; - -out_unmap: - iounmap(hw->hw_addr); -out_free_netdev: - free_netdev(netdev); -out_pci_release: - pci_release_selected_regions(pdev, bars); -out_pci_disable: - pci_disable_device(pdev); - return err; -} - -static void alx_remove(struct pci_dev *pdev) -{ - struct alx_priv *alx = pci_get_drvdata(pdev); - struct alx_hw *hw = &alx->hw; - - cancel_work_sync(&alx->link_check_wk); - cancel_work_sync(&alx->reset_wk); - - /* restore permanent mac address */ - alx_set_macaddr(hw, hw->perm_addr); - - unregister_netdev(alx->dev); - iounmap(hw->hw_addr); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); - - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - - free_netdev(alx->dev); -} - -#ifdef CONFIG_PM_SLEEP -static int alx_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - int err; - bool wol_en; - - err = __alx_shutdown(pdev, &wol_en); - if (err) { - dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err); - return err; - } - - if (wol_en) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; -} - -static int alx_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct alx_priv *alx = pci_get_drvdata(pdev); - struct net_device *netdev = alx->dev; - struct alx_hw *hw = &alx->hw; - int err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - hw->link_speed = SPEED_UNKNOWN; - alx->int_mask = ALX_ISR_MISC; - - alx_reset_pcie(hw); - alx_reset_phy(hw); - - err = alx_reset_mac(hw); - if (err) { - netif_err(alx, hw, alx->dev, - "resume:reset_mac fail %d\n", err); - return -EIO; - } - - err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); - if (err) { - netif_err(alx, hw, alx->dev, - "resume:setup_speed_duplex fail %d\n", err); - return -EIO; - } - - if (netif_running(netdev)) { - err = __alx_open(alx, true); - if (err) - return err; - } - - netif_device_attach(netdev); - - return err; -} -#endif - -static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct alx_priv *alx = pci_get_drvdata(pdev); - struct net_device *netdev = alx->dev; - pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET; - - dev_info(&pdev->dev, "pci error detected\n"); - - rtnl_lock(); - - if (netif_running(netdev)) { - netif_device_detach(netdev); - alx_halt(alx); - } - - if (state == pci_channel_io_perm_failure) - rc = PCI_ERS_RESULT_DISCONNECT; - else - pci_disable_device(pdev); - - rtnl_unlock(); - - return rc; -} - -static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) -{ - struct alx_priv *alx = pci_get_drvdata(pdev); - struct alx_hw *hw = &alx->hw; - pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; - - dev_info(&pdev->dev, "pci error slot reset\n"); - - rtnl_lock(); - - if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); - goto out; - } - - pci_set_master(pdev); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - alx_reset_pcie(hw); - if (!alx_reset_mac(hw)) - rc = PCI_ERS_RESULT_RECOVERED; -out: - pci_cleanup_aer_uncorrect_error_status(pdev); - - rtnl_unlock(); - - return rc; -} - -static void alx_pci_error_resume(struct pci_dev *pdev) -{ - struct alx_priv *alx = pci_get_drvdata(pdev); - struct net_device *netdev = alx->dev; - - dev_info(&pdev->dev, "pci error resume\n"); - - rtnl_lock(); - - if (netif_running(netdev)) { - alx_activate(alx); - netif_device_attach(netdev); - } - - rtnl_unlock(); -} - -static const struct pci_error_handlers alx_err_handlers = { - .error_detected = alx_pci_error_detected, - .slot_reset = alx_pci_error_slot_reset, - .resume = alx_pci_error_resume, -}; - -#ifdef CONFIG_PM_SLEEP -static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); -#define ALX_PM_OPS (&alx_pm_ops) -#else -#define ALX_PM_OPS NULL -#endif - -static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = { - { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), - .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, - { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), - .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, - { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), - .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, - { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, - { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) }, - {} -}; - -static struct pci_driver alx_driver = { - .name = alx_drv_name, - .id_table = alx_pci_tbl, - .probe = alx_probe, - .remove = alx_remove, - .shutdown = alx_shutdown, - .err_handler = &alx_err_handlers, - .driver.pm = ALX_PM_OPS, -}; - -module_pci_driver(alx_driver); -MODULE_DEVICE_TABLE(pci, alx_pci_tbl); -MODULE_AUTHOR("Johannes Berg "); -MODULE_AUTHOR("Qualcomm Corporation, "); -MODULE_DESCRIPTION( - "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/ethernet/atheros/alx/reg.h b/trunk/drivers/net/ethernet/atheros/alx/reg.h deleted file mode 100644 index e4358c98bc4e..000000000000 --- a/trunk/drivers/net/ethernet/atheros/alx/reg.h +++ /dev/null @@ -1,810 +0,0 @@ -/* - * Copyright (c) 2013 Johannes Berg - * - * This file is free software: you may copy, redistribute and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation, either version 2 of the License, or (at your - * option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * This file incorporates work covered by the following copyright and - * permission notice: - * - * Copyright (c) 2012 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef ALX_REG_H -#define ALX_REG_H - -#define ALX_DEV_ID_AR8161 0x1091 -#define ALX_DEV_ID_E2200 0xe091 -#define ALX_DEV_ID_AR8162 0x1090 -#define ALX_DEV_ID_AR8171 0x10A1 -#define ALX_DEV_ID_AR8172 0x10A0 - -/* rev definition, - * bit(0): with xD support - * bit(1): with Card Reader function - * bit(7:2): real revision - */ -#define ALX_PCI_REVID_SHIFT 3 -#define ALX_REV_A0 0 -#define ALX_REV_A1 1 -#define ALX_REV_B0 2 -#define ALX_REV_C0 3 - -#define ALX_DEV_CTRL 0x0060 -#define ALX_DEV_CTRL_MAXRRS_MIN 2 - -#define ALX_MSIX_MASK 0x0090 - -#define ALX_UE_SVRT 0x010C -#define ALX_UE_SVRT_FCPROTERR BIT(13) -#define ALX_UE_SVRT_DLPROTERR BIT(4) - -/* eeprom & flash load register */ -#define ALX_EFLD 0x0204 -#define ALX_EFLD_F_EXIST BIT(10) -#define ALX_EFLD_E_EXIST BIT(9) -#define ALX_EFLD_STAT BIT(5) -#define ALX_EFLD_START BIT(0) - -/* eFuse load register */ -#define ALX_SLD 0x0218 -#define ALX_SLD_STAT BIT(12) -#define ALX_SLD_START BIT(11) -#define ALX_SLD_MAX_TO 100 - -#define ALX_PDLL_TRNS1 0x1104 -#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11) - -#define ALX_PMCTRL 0x12F8 -#define ALX_PMCTRL_HOTRST_WTEN BIT(31) -/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */ -#define ALX_PMCTRL_ASPM_FCEN BIT(30) -#define ALX_PMCTRL_SADLY_EN BIT(29) -#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF -#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24 -#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC -/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */ -#define ALX_PMCTRL_L1REQ_TO_MASK 0xF -#define ALX_PMCTRL_L1REQ_TO_SHIFT 20 -#define ALX_PMCTRL_L1REG_TO_DEF 0xF -#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19) -#define ALX_PMCTRL_L1_TIMER_MASK 0x7 -#define ALX_PMCTRL_L1_TIMER_SHIFT 16 -#define ALX_PMCTRL_L1_TIMER_16US 4 -#define ALX_PMCTRL_RCVR_WT_1US BIT(15) -/* bit13: enable pcie clk switch in L1 state */ -#define ALX_PMCTRL_L1_CLKSW_EN BIT(13) -#define ALX_PMCTRL_L0S_EN BIT(12) -#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11) -#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7) -/* bit6: power down serdes RX */ -#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6) -#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5) -#define ALX_PMCTRL_L1_SRDS_EN BIT(4) -#define ALX_PMCTRL_L1_EN BIT(3) - -/*******************************************************/ -/* following registers are mapped only to memory space */ -/*******************************************************/ - -#define ALX_MASTER 0x1400 -/* bit12: 1:alwys select pclk from serdes, not sw to 25M */ -#define ALX_MASTER_PCLKSEL_SRDS BIT(12) -/* bit11: irq moduration for rx */ -#define ALX_MASTER_IRQMOD2_EN BIT(11) -/* bit10: irq moduration for tx/rx */ -#define ALX_MASTER_IRQMOD1_EN BIT(10) -#define ALX_MASTER_SYSALVTIMER_EN BIT(7) -#define ALX_MASTER_OOB_DIS BIT(6) -/* bit5: wakeup without pcie clk */ -#define ALX_MASTER_WAKEN_25M BIT(5) -/* bit0: MAC & DMA reset */ -#define ALX_MASTER_DMA_MAC_RST BIT(0) -#define ALX_DMA_MAC_RST_TO 50 - -#define ALX_IRQ_MODU_TIMER 0x1408 -#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF -#define ALX_IRQ_MODU_TIMER1_SHIFT 0 - -#define ALX_PHY_CTRL 0x140C -#define ALX_PHY_CTRL_100AB_EN BIT(17) -/* bit14: affect MAC & PHY, go to low power sts */ -#define ALX_PHY_CTRL_POWER_DOWN BIT(14) -/* bit13: 1:pll always ON, 0:can switch in lpw */ -#define ALX_PHY_CTRL_PLL_ON BIT(13) -#define ALX_PHY_CTRL_RST_ANALOG BIT(12) -#define ALX_PHY_CTRL_HIB_PULSE BIT(11) -#define ALX_PHY_CTRL_HIB_EN BIT(10) -#define ALX_PHY_CTRL_IDDQ BIT(7) -#define ALX_PHY_CTRL_GATE_25M BIT(5) -#define ALX_PHY_CTRL_LED_MODE BIT(2) -/* bit0: out of dsp RST state */ -#define ALX_PHY_CTRL_DSPRST_OUT BIT(0) -#define ALX_PHY_CTRL_DSPRST_TO 80 -#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \ - ALX_PHY_CTRL_100AB_EN | \ - ALX_PHY_CTRL_PLL_ON) - -#define ALX_MAC_STS 0x1410 -#define ALX_MAC_STS_TXQ_BUSY BIT(3) -#define ALX_MAC_STS_RXQ_BUSY BIT(2) -#define ALX_MAC_STS_TXMAC_BUSY BIT(1) -#define ALX_MAC_STS_RXMAC_BUSY BIT(0) -#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \ - ALX_MAC_STS_RXQ_BUSY | \ - ALX_MAC_STS_TXMAC_BUSY | \ - ALX_MAC_STS_RXMAC_BUSY) - -#define ALX_MDIO 0x1414 -#define ALX_MDIO_MODE_EXT BIT(30) -#define ALX_MDIO_BUSY BIT(27) -#define ALX_MDIO_CLK_SEL_MASK 0x7 -#define ALX_MDIO_CLK_SEL_SHIFT 24 -#define ALX_MDIO_CLK_SEL_25MD4 0 -#define ALX_MDIO_CLK_SEL_25MD128 7 -#define ALX_MDIO_START BIT(23) -#define ALX_MDIO_SPRES_PRMBL BIT(22) -/* bit21: 1:read,0:write */ -#define ALX_MDIO_OP_READ BIT(21) -#define ALX_MDIO_REG_MASK 0x1F -#define ALX_MDIO_REG_SHIFT 16 -#define ALX_MDIO_DATA_MASK 0xFFFF -#define ALX_MDIO_DATA_SHIFT 0 -#define ALX_MDIO_MAX_AC_TO 120 - -#define ALX_MDIO_EXTN 0x1448 -#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F -#define ALX_MDIO_EXTN_DEVAD_SHIFT 16 -#define ALX_MDIO_EXTN_REG_MASK 0xFFFF -#define ALX_MDIO_EXTN_REG_SHIFT 0 - -#define ALX_SERDES 0x1424 -#define ALX_SERDES_PHYCLK_SLWDWN BIT(18) -#define ALX_SERDES_MACCLK_SLWDWN BIT(17) - -#define ALX_LPI_CTRL 0x1440 -#define ALX_LPI_CTRL_EN BIT(0) - -/* for B0+, bit[13..] for C0+ */ -#define ALX_HRTBT_EXT_CTRL 0x1AD0 -#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F -#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24 -#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23) -#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22) -#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21) -#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20) -#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19) -#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18) -#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17) -#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16) -#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15) -#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14) -#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13) -#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12) -#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF -#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4 -#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3) -#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2) -#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1) -#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0) - -#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4 -#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478 -#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8 -#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC -#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0 -#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4 - -/* 1B8C ~ 1B94 for C0+ */ -#define ALX_SWOI_ACER_CTRL 0x1B8C -#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20) -#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF -#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12 -#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF -#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0 - -#define ALX_SWOI_IOAC_CTRL_2 0x1B90 -#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF -#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24 -#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF -#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12 -#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF -#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0 - -#define ALX_SWOI_IOAC_CTRL_3 0x1B94 -#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF -#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24 -#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF -#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12 -#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF -#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0 - -/* for B0 */ -#define ALX_IDLE_DECISN_TIMER 0x1474 -/* 1ms */ -#define ALX_IDLE_DECISN_TIMER_DEF 0x400 - -#define ALX_MAC_CTRL 0x1480 -#define ALX_MAC_CTRL_FAST_PAUSE BIT(31) -#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30) -/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/ -#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29) -#define ALX_MAC_CTRL_BRD_EN BIT(26) -#define ALX_MAC_CTRL_MULTIALL_EN BIT(25) -#define ALX_MAC_CTRL_SPEED_MASK 0x3 -#define ALX_MAC_CTRL_SPEED_SHIFT 20 -#define ALX_MAC_CTRL_SPEED_10_100 1 -#define ALX_MAC_CTRL_SPEED_1000 2 -#define ALX_MAC_CTRL_PROMISC_EN BIT(15) -#define ALX_MAC_CTRL_VLANSTRIP BIT(14) -#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF -#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10 -#define ALX_MAC_CTRL_PCRCE BIT(7) -#define ALX_MAC_CTRL_CRCE BIT(6) -#define ALX_MAC_CTRL_FULLD BIT(5) -#define ALX_MAC_CTRL_RXFC_EN BIT(3) -#define ALX_MAC_CTRL_TXFC_EN BIT(2) -#define ALX_MAC_CTRL_RX_EN BIT(1) -#define ALX_MAC_CTRL_TX_EN BIT(0) - -#define ALX_STAD0 0x1488 -#define ALX_STAD1 0x148C - -#define ALX_HASH_TBL0 0x1490 -#define ALX_HASH_TBL1 0x1494 - -#define ALX_MTU 0x149C -#define ALX_MTU_JUMBO_TH 1514 -#define ALX_MTU_STD_ALGN 1536 - -#define ALX_SRAM5 0x1524 -#define ALX_SRAM_RXF_LEN_MASK 0xFFF -#define ALX_SRAM_RXF_LEN_SHIFT 0 -#define ALX_SRAM_RXF_LEN_8K (8*1024) - -#define ALX_SRAM9 0x1534 -#define ALX_SRAM_LOAD_PTR BIT(0) - -#define ALX_RX_BASE_ADDR_HI 0x1540 - -#define ALX_TX_BASE_ADDR_HI 0x1544 - -#define ALX_RFD_ADDR_LO 0x1550 -#define ALX_RFD_RING_SZ 0x1560 -#define ALX_RFD_BUF_SZ 0x1564 - -#define ALX_RRD_ADDR_LO 0x1568 -#define ALX_RRD_RING_SZ 0x1578 - -/* pri3: highest, pri0: lowest */ -#define ALX_TPD_PRI3_ADDR_LO 0x14E4 -#define ALX_TPD_PRI2_ADDR_LO 0x14E0 -#define ALX_TPD_PRI1_ADDR_LO 0x157C -#define ALX_TPD_PRI0_ADDR_LO 0x1580 - -/* producer index is 16bit */ -#define ALX_TPD_PRI3_PIDX 0x1618 -#define ALX_TPD_PRI2_PIDX 0x161A -#define ALX_TPD_PRI1_PIDX 0x15F0 -#define ALX_TPD_PRI0_PIDX 0x15F2 - -/* consumer index is 16bit */ -#define ALX_TPD_PRI3_CIDX 0x161C -#define ALX_TPD_PRI2_CIDX 0x161E -#define ALX_TPD_PRI1_CIDX 0x15F4 -#define ALX_TPD_PRI0_CIDX 0x15F6 - -#define ALX_TPD_RING_SZ 0x1584 - -#define ALX_TXQ0 0x1590 -#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF -#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16 -#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200 -#define ALX_TXQ0_LSO_8023_EN BIT(7) -#define ALX_TXQ0_MODE_ENHANCE BIT(6) -#define ALX_TXQ0_EN BIT(5) -#define ALX_TXQ0_SUPT_IPOPT BIT(4) -#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF -#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0 -#define ALX_TXQ_TPD_BURSTPREF_DEF 5 - -#define ALX_TXQ1 0x1594 -/* bit11: drop large packet, len > (rfd buf) */ -#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11) -#define ALX_TXQ1_JUMBO_TSO_TH (7*1024) - -#define ALX_RXQ0 0x15A0 -#define ALX_RXQ0_EN BIT(31) -#define ALX_RXQ0_RSS_HASH_EN BIT(29) -#define ALX_RXQ0_RSS_MODE_MASK 0x3 -#define ALX_RXQ0_RSS_MODE_SHIFT 26 -#define ALX_RXQ0_RSS_MODE_DIS 0 -#define ALX_RXQ0_RSS_MODE_MQMI 3 -#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F -#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20 -#define ALX_RXQ0_NUM_RFD_PREF_DEF 8 -#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF -#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8 -#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100 -#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128 -#define ALX_RXQ0_IPV6_PARSE_EN BIT(7) -#define ALX_RXQ0_RSS_HSTYP_MASK 0xF -#define ALX_RXQ0_RSS_HSTYP_SHIFT 2 -#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5) -#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4) -#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3) -#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2) -#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \ - ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \ - ALX_RXQ0_RSS_HSTYP_IPV6_EN | \ - ALX_RXQ0_RSS_HSTYP_IPV4_EN) -#define ALX_RXQ0_ASPM_THRESH_MASK 0x3 -#define ALX_RXQ0_ASPM_THRESH_SHIFT 0 -#define ALX_RXQ0_ASPM_THRESH_100M 3 - -#define ALX_RXQ2 0x15A8 -#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF -#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16 -#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF -#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0 -/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) + - * rx-packet(1522) + delay-of-link(64) - * = 3212. - */ -#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212 - -#define ALX_DMA 0x15C0 -#define ALX_DMA_RCHNL_SEL_MASK 0x3 -#define ALX_DMA_RCHNL_SEL_SHIFT 26 -#define ALX_DMA_WDLY_CNT_MASK 0xF -#define ALX_DMA_WDLY_CNT_SHIFT 16 -#define ALX_DMA_WDLY_CNT_DEF 4 -#define ALX_DMA_RDLY_CNT_MASK 0x1F -#define ALX_DMA_RDLY_CNT_SHIFT 11 -#define ALX_DMA_RDLY_CNT_DEF 15 -/* bit10: 0:tpd with pri, 1: data */ -#define ALX_DMA_RREQ_PRI_DATA BIT(10) -#define ALX_DMA_RREQ_BLEN_MASK 0x7 -#define ALX_DMA_RREQ_BLEN_SHIFT 4 -#define ALX_DMA_RORDER_MODE_MASK 0x7 -#define ALX_DMA_RORDER_MODE_SHIFT 0 -#define ALX_DMA_RORDER_MODE_OUT 4 - -#define ALX_WOL0 0x14A0 -#define ALX_WOL0_PME_LINK BIT(5) -#define ALX_WOL0_LINK_EN BIT(4) -#define ALX_WOL0_PME_MAGIC_EN BIT(3) -#define ALX_WOL0_MAGIC_EN BIT(2) - -#define ALX_RFD_PIDX 0x15E0 - -#define ALX_RFD_CIDX 0x15F8 - -/* MIB */ -#define ALX_MIB_BASE 0x1700 -#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0) -#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92) -#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96) -#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192) - -#define ALX_RX_STATS_BIN ALX_MIB_RX_OK -#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR -#define ALX_TX_STATS_BIN ALX_MIB_TX_OK -#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT - -#define ALX_ISR 0x1600 -#define ALX_ISR_DIS BIT(31) -#define ALX_ISR_RX_Q7 BIT(30) -#define ALX_ISR_RX_Q6 BIT(29) -#define ALX_ISR_RX_Q5 BIT(28) -#define ALX_ISR_RX_Q4 BIT(27) -#define ALX_ISR_PCIE_LNKDOWN BIT(26) -#define ALX_ISR_RX_Q3 BIT(19) -#define ALX_ISR_RX_Q2 BIT(18) -#define ALX_ISR_RX_Q1 BIT(17) -#define ALX_ISR_RX_Q0 BIT(16) -#define ALX_ISR_TX_Q0 BIT(15) -#define ALX_ISR_PHY BIT(12) -#define ALX_ISR_DMAW BIT(10) -#define ALX_ISR_DMAR BIT(9) -#define ALX_ISR_TXF_UR BIT(8) -#define ALX_ISR_TX_Q3 BIT(7) -#define ALX_ISR_TX_Q2 BIT(6) -#define ALX_ISR_TX_Q1 BIT(5) -#define ALX_ISR_RFD_UR BIT(4) -#define ALX_ISR_RXF_OV BIT(3) -#define ALX_ISR_MANU BIT(2) -#define ALX_ISR_TIMER BIT(1) -#define ALX_ISR_SMB BIT(0) - -#define ALX_IMR 0x1604 - -/* re-send assert msg if SW no response */ -#define ALX_INT_RETRIG 0x1608 -/* 40ms */ -#define ALX_INT_RETRIG_TO 20000 - -#define ALX_SMB_TIMER 0x15C4 - -#define ALX_TINT_TPD_THRSHLD 0x15C8 - -#define ALX_TINT_TIMER 0x15CC - -#define ALX_CLK_GATE 0x1814 -#define ALX_CLK_GATE_RXMAC BIT(5) -#define ALX_CLK_GATE_TXMAC BIT(4) -#define ALX_CLK_GATE_RXQ BIT(3) -#define ALX_CLK_GATE_TXQ BIT(2) -#define ALX_CLK_GATE_DMAR BIT(1) -#define ALX_CLK_GATE_DMAW BIT(0) -#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \ - ALX_CLK_GATE_TXMAC | \ - ALX_CLK_GATE_RXQ | \ - ALX_CLK_GATE_TXQ | \ - ALX_CLK_GATE_DMAR | \ - ALX_CLK_GATE_DMAW) - -/* interop between drivers */ -#define ALX_DRV 0x1804 -#define ALX_DRV_PHY_AUTO BIT(28) -#define ALX_DRV_PHY_1000 BIT(27) -#define ALX_DRV_PHY_100 BIT(26) -#define ALX_DRV_PHY_10 BIT(25) -#define ALX_DRV_PHY_DUPLEX BIT(24) -/* bit23: adv Pause */ -#define ALX_DRV_PHY_PAUSE BIT(23) -/* bit22: adv Asym Pause */ -#define ALX_DRV_PHY_MASK 0xFF -#define ALX_DRV_PHY_SHIFT 21 -#define ALX_DRV_PHY_UNKNOWN 0 - -/* flag of phy inited */ -#define ALX_PHY_INITED 0x003F - -/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */ -#define ALX_WOL_CTRL2 0x1830 -#define ALX_WOL_CTRL2_DATA_STORE BIT(3) -#define ALX_WOL_CTRL2_PTRN_EVT BIT(2) -#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1) -#define ALX_WOL_CTRL2_PTRN_EN BIT(0) - -#define ALX_WOL_CTRL3 0x1834 -#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF -#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0 - -#define ALX_WOL_CTRL4 0x1838 -#define ALX_WOL_CTRL4_PT15_MATCH BIT(31) -#define ALX_WOL_CTRL4_PT14_MATCH BIT(30) -#define ALX_WOL_CTRL4_PT13_MATCH BIT(29) -#define ALX_WOL_CTRL4_PT12_MATCH BIT(28) -#define ALX_WOL_CTRL4_PT11_MATCH BIT(27) -#define ALX_WOL_CTRL4_PT10_MATCH BIT(26) -#define ALX_WOL_CTRL4_PT9_MATCH BIT(25) -#define ALX_WOL_CTRL4_PT8_MATCH BIT(24) -#define ALX_WOL_CTRL4_PT7_MATCH BIT(23) -#define ALX_WOL_CTRL4_PT6_MATCH BIT(22) -#define ALX_WOL_CTRL4_PT5_MATCH BIT(21) -#define ALX_WOL_CTRL4_PT4_MATCH BIT(20) -#define ALX_WOL_CTRL4_PT3_MATCH BIT(19) -#define ALX_WOL_CTRL4_PT2_MATCH BIT(18) -#define ALX_WOL_CTRL4_PT1_MATCH BIT(17) -#define ALX_WOL_CTRL4_PT0_MATCH BIT(16) -#define ALX_WOL_CTRL4_PT15_EN BIT(15) -#define ALX_WOL_CTRL4_PT14_EN BIT(14) -#define ALX_WOL_CTRL4_PT13_EN BIT(13) -#define ALX_WOL_CTRL4_PT12_EN BIT(12) -#define ALX_WOL_CTRL4_PT11_EN BIT(11) -#define ALX_WOL_CTRL4_PT10_EN BIT(10) -#define ALX_WOL_CTRL4_PT9_EN BIT(9) -#define ALX_WOL_CTRL4_PT8_EN BIT(8) -#define ALX_WOL_CTRL4_PT7_EN BIT(7) -#define ALX_WOL_CTRL4_PT6_EN BIT(6) -#define ALX_WOL_CTRL4_PT5_EN BIT(5) -#define ALX_WOL_CTRL4_PT4_EN BIT(4) -#define ALX_WOL_CTRL4_PT3_EN BIT(3) -#define ALX_WOL_CTRL4_PT2_EN BIT(2) -#define ALX_WOL_CTRL4_PT1_EN BIT(1) -#define ALX_WOL_CTRL4_PT0_EN BIT(0) - -#define ALX_WOL_CTRL5 0x183C -#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24 -#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16 -#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8 -#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0 - -#define ALX_WOL_CTRL6 0x1840 -#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24 -#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16 -#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8 -#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0 - -#define ALX_WOL_CTRL7 0x1844 -#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24 -#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16 -#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8 -#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0 - -#define ALX_WOL_CTRL8 0x1848 -#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24 -#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16 -#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8 -#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF -#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0 - -#define ALX_ACER_FIXED_PTN0 0x1850 -#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF -#define ALX_ACER_FIXED_PTN0_SHIFT 0 - -#define ALX_ACER_FIXED_PTN1 0x1854 -#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF -#define ALX_ACER_FIXED_PTN1_SHIFT 0 - -#define ALX_ACER_RANDOM_NUM0 0x1858 -#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF -#define ALX_ACER_RANDOM_NUM0_SHIFT 0 - -#define ALX_ACER_RANDOM_NUM1 0x185C -#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF -#define ALX_ACER_RANDOM_NUM1_SHIFT 0 - -#define ALX_ACER_RANDOM_NUM2 0x1860 -#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF -#define ALX_ACER_RANDOM_NUM2_SHIFT 0 - -#define ALX_ACER_RANDOM_NUM3 0x1864 -#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF -#define ALX_ACER_RANDOM_NUM3_SHIFT 0 - -#define ALX_ACER_MAGIC 0x1868 -#define ALX_ACER_MAGIC_EN BIT(31) -#define ALX_ACER_MAGIC_PME_EN BIT(30) -#define ALX_ACER_MAGIC_MATCH BIT(29) -#define ALX_ACER_MAGIC_FF_CHECK BIT(10) -#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F -#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5 -#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F -#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0 - -#define ALX_ACER_TIMER 0x186C -#define ALX_ACER_TIMER_EN BIT(31) -#define ALX_ACER_TIMER_PME_EN BIT(30) -#define ALX_ACER_TIMER_MATCH BIT(29) -#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF -#define ALX_ACER_TIMER_THRES_SHIFT 0 -#define ALX_ACER_TIMER_THRES_DEF 1 - -/* RSS definitions */ -#define ALX_RSS_KEY0 0x14B0 -#define ALX_RSS_KEY1 0x14B4 -#define ALX_RSS_KEY2 0x14B8 -#define ALX_RSS_KEY3 0x14BC -#define ALX_RSS_KEY4 0x14C0 -#define ALX_RSS_KEY5 0x14C4 -#define ALX_RSS_KEY6 0x14C8 -#define ALX_RSS_KEY7 0x14CC -#define ALX_RSS_KEY8 0x14D0 -#define ALX_RSS_KEY9 0x14D4 - -#define ALX_RSS_IDT_TBL0 0x1B00 - -#define ALX_MSI_MAP_TBL1 0x15D0 -#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20 -#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16 -#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12 -#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8 -#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4 -#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0 - -#define ALX_MSI_MAP_TBL2 0x15D8 -#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20 -#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16 -#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12 -#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8 -#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4 -#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0 - -#define ALX_MSI_ID_MAP 0x15D4 - -#define ALX_MSI_RETRANS_TIMER 0x1920 -/* bit16: 1:line,0:standard */ -#define ALX_MSI_MASK_SEL_LINE BIT(16) -#define ALX_MSI_RETRANS_TM_MASK 0xFFFF -#define ALX_MSI_RETRANS_TM_SHIFT 0 - -/* CR DMA ctrl */ - -/* TX QoS */ -#define ALX_WRR 0x1938 -#define ALX_WRR_PRI_MASK 0x3 -#define ALX_WRR_PRI_SHIFT 29 -#define ALX_WRR_PRI_RESTRICT_NONE 3 -#define ALX_WRR_PRI3_MASK 0x1F -#define ALX_WRR_PRI3_SHIFT 24 -#define ALX_WRR_PRI2_MASK 0x1F -#define ALX_WRR_PRI2_SHIFT 16 -#define ALX_WRR_PRI1_MASK 0x1F -#define ALX_WRR_PRI1_SHIFT 8 -#define ALX_WRR_PRI0_MASK 0x1F -#define ALX_WRR_PRI0_SHIFT 0 - -#define ALX_HQTPD 0x193C -#define ALX_HQTPD_BURST_EN BIT(31) -#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF -#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8 -#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF -#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4 -#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF -#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0 - -#define ALX_MISC 0x19C0 -#define ALX_MISC_PSW_OCP_MASK 0x7 -#define ALX_MISC_PSW_OCP_SHIFT 21 -#define ALX_MISC_PSW_OCP_DEF 0x7 -#define ALX_MISC_ISO_EN BIT(12) -#define ALX_MISC_INTNLOSC_OPEN BIT(3) - -#define ALX_MSIC2 0x19C8 -#define ALX_MSIC2_CALB_START BIT(0) - -#define ALX_MISC3 0x19CC -/* bit1: 1:Software control 25M */ -#define ALX_MISC3_25M_BY_SW BIT(1) -/* bit0: 25M switch to intnl OSC */ -#define ALX_MISC3_25M_NOTO_INTNL BIT(0) - -/* MSIX tbl in memory space */ -#define ALX_MSIX_ENTRY_BASE 0x2000 - -/********************* PHY regs definition ***************************/ - -/* PHY Specific Status Register */ -#define ALX_MII_GIGA_PSSR 0x11 -#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 -#define ALX_GIGA_PSSR_DPLX 0x2000 -#define ALX_GIGA_PSSR_SPEED 0xC000 -#define ALX_GIGA_PSSR_10MBS 0x0000 -#define ALX_GIGA_PSSR_100MBS 0x4000 -#define ALX_GIGA_PSSR_1000MBS 0x8000 - -/* PHY Interrupt Enable Register */ -#define ALX_MII_IER 0x12 -#define ALX_IER_LINK_UP 0x0400 -#define ALX_IER_LINK_DOWN 0x0800 - -/* PHY Interrupt Status Register */ -#define ALX_MII_ISR 0x13 - -#define ALX_MII_DBG_ADDR 0x1D -#define ALX_MII_DBG_DATA 0x1E - -/***************************** debug port *************************************/ - -#define ALX_MIIDBG_ANACTRL 0x00 -#define ALX_ANACTRL_DEF 0x02EF - -#define ALX_MIIDBG_SYSMODCTRL 0x04 -/* en half bias */ -#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B - -#define ALX_MIIDBG_SRDSYSMOD 0x05 -#define ALX_SRDSYSMOD_DEEMP_EN 0x0040 -#define ALX_SRDSYSMOD_DEF 0x2C46 - -#define ALX_MIIDBG_HIBNEG 0x0B -#define ALX_HIBNEG_PSHIB_EN 0x8000 -#define ALX_HIBNEG_HIB_PSE 0x1000 -#define ALX_HIBNEG_DEF 0xBC40 -#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \ - ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE)) - -#define ALX_MIIDBG_TST10BTCFG 0x12 -#define ALX_TST10BTCFG_DEF 0x4C04 - -#define ALX_MIIDBG_AZ_ANADECT 0x15 -#define ALX_AZ_ANADECT_DEF 0x3220 -#define ALX_AZ_ANADECT_LONG 0x3210 - -#define ALX_MIIDBG_MSE16DB 0x18 -#define ALX_MSE16DB_UP 0x05EA -#define ALX_MSE16DB_DOWN 0x02EA - -#define ALX_MIIDBG_MSE20DB 0x1C -#define ALX_MSE20DB_TH_MASK 0x7F -#define ALX_MSE20DB_TH_SHIFT 2 -#define ALX_MSE20DB_TH_DEF 0x2E -#define ALX_MSE20DB_TH_HI 0x54 - -#define ALX_MIIDBG_AGC 0x23 -#define ALX_AGC_2_VGA_MASK 0x3FU -#define ALX_AGC_2_VGA_SHIFT 8 -#define ALX_AGC_LONG1G_LIMT 40 -#define ALX_AGC_LONG100M_LIMT 44 - -#define ALX_MIIDBG_LEGCYPS 0x29 -#define ALX_LEGCYPS_EN 0x8000 -#define ALX_LEGCYPS_DEF 0x129D - -#define ALX_MIIDBG_TST100BTCFG 0x36 -#define ALX_TST100BTCFG_DEF 0xE12C - -#define ALX_MIIDBG_GREENCFG 0x3B -#define ALX_GREENCFG_DEF 0x7078 - -#define ALX_MIIDBG_GREENCFG2 0x3D -#define ALX_GREENCFG2_BP_GREEN 0x8000 -#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080 - -/******* dev 3 *********/ -#define ALX_MIIEXT_PCS 3 - -#define ALX_MIIEXT_CLDCTRL3 0x8003 -#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000 - -#define ALX_MIIEXT_CLDCTRL5 0x8005 -#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000 - -#define ALX_MIIEXT_CLDCTRL6 0x8006 -#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF -#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0 -#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116 -#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152 - -#define ALX_MIIEXT_VDRVBIAS 0x8062 -#define ALX_VDRVBIAS_DEF 0x3 - -/********* dev 7 **********/ -#define ALX_MIIEXT_ANEG 7 - -#define ALX_MIIEXT_LOCAL_EEEADV 0x3C -#define ALX_LOCAL_EEEADV_1000BT 0x0004 -#define ALX_LOCAL_EEEADV_100BT 0x0002 - -#define ALX_MIIEXT_AFE 0x801A -#define ALX_AFE_10BT_100M_TH 0x0040 - -#define ALX_MIIEXT_S3DIG10 0x8023 -/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */ -#define ALX_MIIEXT_S3DIG10_SL 0x0001 -#define ALX_MIIEXT_S3DIG10_DEF 0 - -#define ALX_MIIEXT_NLP78 0x8027 -#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05 - -#endif diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 638e55435b04..b8fbe266ab68 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3192,11 +3192,11 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) rc |= XMIT_CSUM_TCP; if (skb_is_gso_v6(skb)) { - rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); + rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); if (rc & XMIT_CSUM_ENC) rc |= XMIT_GSO_ENC_V6; } else if (skb_is_gso(skb)) { - rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); + rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); if (rc & XMIT_CSUM_ENC) rc |= XMIT_GSO_ENC_V4; } @@ -3313,7 +3313,6 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, */ static void bnx2x_set_pbd_gso(struct sk_buff *skb, struct eth_tx_parse_bd_e1x *pbd, - struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) { pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); @@ -3327,14 +3326,11 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); - /* GSO on 57710/57711 needs FW to calculate IP checksum */ - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; - } else { + } else pbd->tcp_pseudo_csum = bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); - } pbd->global_data |= cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); @@ -3483,18 +3479,19 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, { u16 hlen_w = 0; u8 outerip_off, outerip_len = 0; - /* from outer IP to transport */ hlen_w = (skb_inner_transport_header(skb) - skb_network_header(skb)) >> 1; /* transport len */ - hlen_w += inner_tcp_hdrlen(skb) >> 1; + if (xmit_type & XMIT_CSUM_TCP) + hlen_w += inner_tcp_hdrlen(skb) >> 1; + else + hlen_w += sizeof(struct udphdr) >> 1; pbd2->fw_ip_hdr_to_payload_w = hlen_w; - /* outer IP header info */ - if (xmit_type & XMIT_CSUM_V4) { + if (xmit_type & XMIT_CSUM_ENC_V4) { struct iphdr *iph = ip_hdr(skb); pbd2->fw_ip_csum_wo_len_flags_frag = bswab16(csum_fold((~iph->check) - @@ -3817,7 +3814,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, xmit_type); else - bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type); + bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); } /* Set the PBD's parsing_data field if not zero diff --git a/trunk/drivers/net/ethernet/broadcom/tg3.c b/trunk/drivers/net/ethernet/broadcom/tg3.c index a13463e8a2c3..728d42ab2a76 100644 --- a/trunk/drivers/net/ethernet/broadcom/tg3.c +++ b/trunk/drivers/net/ethernet/broadcom/tg3.c @@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 132 +#define TG3_MIN_NUM 131 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "May 21, 2013" +#define DRV_MODULE_RELDATE "April 09, 2013" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -744,9 +744,6 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) status = tg3_ape_read32(tp, gnt + off); if (status == bit) break; - if (pci_channel_offline(tp->pdev)) - break; - udelay(10); } @@ -1638,9 +1635,6 @@ static void tg3_wait_for_event_ack(struct tg3 *tp) for (i = 0; i < delay_cnt; i++) { if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) break; - if (pci_channel_offline(tp->pdev)) - break; - udelay(8); } } @@ -1806,9 +1800,6 @@ static int tg3_poll_fw(struct tg3 *tp) int i; u32 val; - if (tg3_flag(tp, NO_FWARE_REPORTED)) - return 0; - if (tg3_flag(tp, IS_SSB_CORE)) { /* We don't use firmware. */ return 0; @@ -1819,9 +1810,6 @@ static int tg3_poll_fw(struct tg3 *tp) for (i = 0; i < 200; i++) { if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) return 0; - if (pci_channel_offline(tp->pdev)) - return -ENODEV; - udelay(100); } return -ENODEV; @@ -1832,15 +1820,6 @@ static int tg3_poll_fw(struct tg3 *tp) tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) break; - if (pci_channel_offline(tp->pdev)) { - if (!tg3_flag(tp, NO_FWARE_REPORTED)) { - tg3_flag_set(tp, NO_FWARE_REPORTED); - netdev_info(tp->dev, "No firmware running\n"); - } - - break; - } - udelay(10); } @@ -2978,31 +2957,6 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) return 0; } -static bool tg3_phy_power_bug(struct tg3 *tp) -{ - switch (tg3_asic_rev(tp)) { - case ASIC_REV_5700: - case ASIC_REV_5704: - return true; - case ASIC_REV_5780: - if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) - return true; - return false; - case ASIC_REV_5717: - if (!tp->pci_fn) - return true; - return false; - case ASIC_REV_5719: - case ASIC_REV_5720: - if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && - !tp->pci_fn) - return true; - return false; - } - - return false; -} - static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; @@ -3062,7 +3016,12 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) /* The PHY should not be powered down on some chips because * of bugs. */ - if (tg3_phy_power_bug(tp)) + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5704 || + (tg3_asic_rev(tp) == ASIC_REV_5780 && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) || + (tg3_asic_rev(tp) == ASIC_REV_5717 && + !tp->pci_fn)) return; if (tg3_chip_rev(tp) == CHIPREV_5784_AX || @@ -3538,8 +3497,6 @@ static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) break; - if (pci_channel_offline(tp->pdev)) - return -EBUSY; } return (i == iters) ? -EBUSY : 0; @@ -7471,20 +7428,6 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) return (base > 0xffffdcc0) && (base + len + 8 < base); } -/* Test for TSO DMA buffers that cross into regions which are within MSS bytes - * of any 4GB boundaries: 4G, 8G, etc - */ -static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, - u32 len, u32 mss) -{ - if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { - u32 base = (u32) mapping & 0xffffffff; - - return ((base + len + (mss & 0x3fff)) < base); - } - return 0; -} - /* Test for DMA addresses > 40-bit */ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, int len) @@ -7521,9 +7464,6 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, if (tg3_4g_overflow_test(map, len)) hwbug = true; - if (tg3_4g_tso_overflow_test(tp, map, len, mss)) - hwbug = true; - if (tg3_40bit_overflow_test(tp, map, len)) hwbug = true; @@ -8609,14 +8549,6 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, boo tw32_f(ofs, val); for (i = 0; i < MAX_WAIT_CNT; i++) { - if (pci_channel_offline(tp->pdev)) { - dev_err(&tp->pdev->dev, - "tg3_stop_block device offline, " - "ofs=%lx enable_bit=%x\n", - ofs, enable_bit); - return -ENODEV; - } - udelay(100); val = tr32(ofs); if ((val & enable_bit) == 0) @@ -8640,13 +8572,6 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent) tg3_disable_ints(tp); - if (pci_channel_offline(tp->pdev)) { - tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); - tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; - err = -ENODEV; - goto err_no_dev; - } - tp->rx_mode &= ~RX_MODE_ENABLE; tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); @@ -8695,7 +8620,6 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent) err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); -err_no_dev: for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; if (tnapi->hw_status) @@ -8950,10 +8874,6 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_halt_cpu(tp, RX_CPU_BASE); } - err = tg3_poll_fw(tp); - if (err) - return err; - tw32(GRC_MODE, tp->grc_mode); if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { @@ -8984,6 +8904,10 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); + err = tg3_poll_fw(tp); + if (err) + return err; + tg3_mdio_start(tp); if (tg3_flag(tp, PCI_EXPRESS) && @@ -9507,14 +9431,6 @@ static void tg3_rss_write_indir_tbl(struct tg3 *tp) } } -static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) -{ - if (tg3_asic_rev(tp) == ASIC_REV_5719) - return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; - else - return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; -} - /* tp->lock is held. */ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) { @@ -10200,17 +10116,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) tw32_f(RDMAC_MODE, rdmac_mode); udelay(40); - if (tg3_asic_rev(tp) == ASIC_REV_5719 || - tg3_asic_rev(tp) == ASIC_REV_5720) { + if (tg3_asic_rev(tp) == ASIC_REV_5719) { for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) break; } if (i < TG3_NUM_RDMA_CHANNELS) { val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); - val |= tg3_lso_rd_dma_workaround_bit(tp); + val |= TG3_LSO_RD_DMA_TX_LENGTH_WA; tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); - tg3_flag_set(tp, 5719_5720_RDMA_BUG); + tg3_flag_set(tp, 5719_RDMA_BUG); } } @@ -10443,13 +10358,6 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) */ static int tg3_init_hw(struct tg3 *tp, bool reset_phy) { - /* Chip may have been just powered on. If so, the boot code may still - * be running initialization. Wait for it to finish to avoid races in - * accessing the hardware. - */ - tg3_enable_register_access(tp); - tg3_poll_fw(tp); - tg3_switch_clocks(tp); tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); @@ -10581,15 +10489,15 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp) TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); - if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && + if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) && (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { u32 val; val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); - val &= ~tg3_lso_rd_dma_workaround_bit(tp); + val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA; tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); - tg3_flag_clear(tp, 5719_5720_RDMA_BUG); + tg3_flag_clear(tp, 5719_RDMA_BUG); } TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); diff --git a/trunk/drivers/net/ethernet/broadcom/tg3.h b/trunk/drivers/net/ethernet/broadcom/tg3.h index ff6e30eeae35..9b2d3ac2474a 100644 --- a/trunk/drivers/net/ethernet/broadcom/tg3.h +++ b/trunk/drivers/net/ethernet/broadcom/tg3.h @@ -1422,8 +1422,7 @@ #define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000 -#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719 0x02000000 -#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720 0x00200000 +#define TG3_LSO_RD_DMA_TX_LENGTH_WA 0x02000000 /* 0x4914 --> 0x4be0 unused */ #define TG3_NUM_RDMA_CHANNELS 4 @@ -3060,7 +3059,7 @@ enum TG3_FLAGS { TG3_FLAG_APE_HAS_NCSI, TG3_FLAG_TX_TSTAMP_EN, TG3_FLAG_4K_FIFO_LIMIT, - TG3_FLAG_5719_5720_RDMA_BUG, + TG3_FLAG_5719_RDMA_BUG, TG3_FLAG_RESET_TASK_PENDING, TG3_FLAG_PTP_CAPABLE, TG3_FLAG_5705_PLUS, diff --git a/trunk/drivers/net/ethernet/brocade/bna/bnad.c b/trunk/drivers/net/ethernet/brocade/bna/bnad.c index 07f7ef05c3f2..ce4a030d3d0c 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bnad.c +++ b/trunk/drivers/net/ethernet/brocade/bna/bnad.c @@ -3236,10 +3236,9 @@ bnad_init(struct bnad *bnad, sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); bnad->work_q = create_singlethread_workqueue(bnad->wq_name); - if (!bnad->work_q) { - iounmap(bnad->bar0); + + if (!bnad->work_q) return -ENOMEM; - } return 0; } diff --git a/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 94d957d203a6..6e8bc9d88c41 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig) file->f_pos += offset; break; case 2: - file->f_pos = debug->buffer_len + offset; + file->f_pos = debug->buffer_len - offset; break; default: return -EINVAL; diff --git a/trunk/drivers/net/ethernet/cadence/Kconfig b/trunk/drivers/net/ethernet/cadence/Kconfig index 768285ec10f4..1194446f859a 100644 --- a/trunk/drivers/net/ethernet/cadence/Kconfig +++ b/trunk/drivers/net/ethernet/cadence/Kconfig @@ -22,7 +22,7 @@ if NET_CADENCE config ARM_AT91_ETHER tristate "AT91RM9200 Ethernet support" - depends on GENERIC_HARDIRQS && HAS_DMA + depends on GENERIC_HARDIRQS select NET_CORE select MACB ---help--- @@ -31,7 +31,6 @@ config ARM_AT91_ETHER config MACB tristate "Cadence MACB/GEM support" - depends on HAS_DMA select PHYLIB ---help--- The Cadence MACB ethernet interface is found on many Atmel AT32 and diff --git a/trunk/drivers/net/ethernet/cadence/macb.c b/trunk/drivers/net/ethernet/cadence/macb.c index c89aa41dd448..6be513deb17f 100644 --- a/trunk/drivers/net/ethernet/cadence/macb.c +++ b/trunk/drivers/net/ethernet/cadence/macb.c @@ -485,8 +485,7 @@ static void macb_tx_interrupt(struct macb *bp) status = macb_readl(bp, TSR); macb_writel(bp, TSR, status); - if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) - macb_writel(bp, ISR, MACB_BIT(TCOMP)); + macb_writel(bp, ISR, MACB_BIT(TCOMP)); netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", (unsigned long)status); @@ -739,8 +738,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) * now. */ macb_writel(bp, IDR, MACB_RX_INT_FLAGS); - if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) - macb_writel(bp, ISR, MACB_BIT(RCOMP)); + macb_writel(bp, ISR, MACB_BIT(RCOMP)); if (napi_schedule_prep(&bp->napi)) { netdev_vdbg(bp->dev, "scheduling RX softirq\n"); @@ -1064,17 +1062,6 @@ static void macb_configure_dma(struct macb *bp) } } -/* - * Configure peripheral capacities according to integration options used - */ -static void macb_configure_caps(struct macb *bp) -{ - if (macb_is_gem(bp)) { - if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0) - bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; - } -} - static void macb_init_hw(struct macb *bp) { u32 config; @@ -1097,7 +1084,6 @@ static void macb_init_hw(struct macb *bp) bp->duplex = DUPLEX_HALF; macb_configure_dma(bp); - macb_configure_caps(bp); /* Initialize TX and RX buffers */ macb_writel(bp, RBQP, bp->rx_ring_dma); diff --git a/trunk/drivers/net/ethernet/cadence/macb.h b/trunk/drivers/net/ethernet/cadence/macb.h index 548c0ecae869..993d70380688 100644 --- a/trunk/drivers/net/ethernet/cadence/macb.h +++ b/trunk/drivers/net/ethernet/cadence/macb.h @@ -300,8 +300,6 @@ #define MACB_REV_SIZE 16 /* Bitfields in DCFG1. */ -#define GEM_IRQCOR_OFFSET 23 -#define GEM_IRQCOR_SIZE 1 #define GEM_DBWDEF_OFFSET 25 #define GEM_DBWDEF_SIZE 3 @@ -325,9 +323,6 @@ #define MACB_MAN_READ 2 #define MACB_MAN_CODE 2 -/* Capability mask bits */ -#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x1 - /* Bit manipulation macros */ #define MACB_BIT(name) \ (1 << MACB_##name##_OFFSET) @@ -579,8 +574,6 @@ struct macb { unsigned int speed; unsigned int duplex; - u32 caps; - phy_interface_t phy_interface; /* AT91RM9200 transmit */ diff --git a/trunk/drivers/net/ethernet/calxeda/Kconfig b/trunk/drivers/net/ethernet/calxeda/Kconfig index 184a063bed5f..aba435c3d4ae 100644 --- a/trunk/drivers/net/ethernet/calxeda/Kconfig +++ b/trunk/drivers/net/ethernet/calxeda/Kconfig @@ -1,6 +1,6 @@ config NET_CALXEDA_XGMAC tristate "Calxeda 1G/10G XGMAC Ethernet driver" - depends on HAS_IOMEM && HAS_DMA + depends on HAS_IOMEM select CRC32 help This is the driver for the XGMAC Ethernet IP block found on Calxeda diff --git a/trunk/drivers/net/ethernet/dec/tulip/interrupt.c b/trunk/drivers/net/ethernet/dec/tulip/interrupt.c index 92306b320840..28a5e425fecf 100644 --- a/trunk/drivers/net/ethernet/dec/tulip/interrupt.c +++ b/trunk/drivers/net/ethernet/dec/tulip/interrupt.c @@ -76,12 +76,6 @@ int tulip_refill_rx(struct net_device *dev) mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - if (dma_mapping_error(&tp->pdev->dev, mapping)) { - dev_kfree_skb(skb); - tp->rx_buffers[entry].skb = NULL; - break; - } - tp->rx_buffers[entry].mapping = mapping; tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); diff --git a/trunk/drivers/net/ethernet/emulex/benet/be.h b/trunk/drivers/net/ethernet/emulex/benet/be.h index 0a510684e468..f544b297c9ab 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be.h +++ b/trunk/drivers/net/ethernet/emulex/benet/be.h @@ -262,7 +262,6 @@ struct be_rx_compl_info { u8 ipv6; u8 vtm; u8 pkt_type; - u8 ip_frag; }; struct be_rx_obj { diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c index 1db2df61b8af..fd7b547698ab 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -562,7 +562,7 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter) resource_error = lancer_provisioning_error(adapter); if (resource_error) - return -EAGAIN; + return -1; status = lancer_wait_ready(adapter); if (!status) { @@ -590,8 +590,8 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter) * when PF provisions resources. */ resource_error = lancer_provisioning_error(adapter); - if (resource_error) - status = -EAGAIN; + if (status == -1 && !resource_error) + adapter->eeh_error = true; return status; } @@ -2976,17 +2976,22 @@ static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count, for (i = 0; i < desc_count; i++) { desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE; if (((void *)desc + desc->desc_len) > - (void *)(buf + max_buf_size)) - return NULL; + (void *)(buf + max_buf_size)) { + desc = NULL; + break; + } if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1) - return desc; + break; desc = (void *)desc + desc->desc_len; } - return NULL; + if (!desc || i == MAX_RESOURCE_DESC) + return NULL; + + return desc; } /* Uses Mbox */ diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_hw.h b/trunk/drivers/net/ethernet/emulex/benet/be_hw.h index 8780183c6d1c..3c1099b47f2a 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/trunk/drivers/net/ethernet/emulex/benet/be_hw.h @@ -356,7 +356,7 @@ struct amap_eth_rx_compl_v0 { u8 ip_version; /* dword 1 */ u8 macdst[6]; /* dword 1 */ u8 vtp; /* dword 1 */ - u8 ip_frag; /* dword 1 */ + u8 rsvd0; /* dword 1 */ u8 fragndx[10]; /* dword 1 */ u8 ct[2]; /* dword 1 */ u8 sw; /* dword 1 */ diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c index a0b4be51f0d1..a444110b060f 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c @@ -780,18 +780,26 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, if (unlikely(!skb)) return skb; - if (vlan_tx_tag_present(skb)) + if (vlan_tx_tag_present(skb)) { vlan_tag = be_get_tx_vlan_tag(adapter, skb); - else if (qnq_async_evt_rcvd(adapter) && adapter->pvid) - vlan_tag = adapter->pvid; + skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + if (skb) + skb->vlan_tci = 0; + } + + if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { + if (!vlan_tag) + vlan_tag = adapter->pvid; + if (skip_hw_vlan) + *skip_hw_vlan = true; + } if (vlan_tag) { skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); if (unlikely(!skb)) return skb; + skb->vlan_tci = 0; - if (skip_hw_vlan) - *skip_hw_vlan = true; } /* Insert the outer VLAN, if any */ @@ -1599,8 +1607,6 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, compl); } rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); - rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, - ip_frag, compl); } static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) @@ -1622,9 +1628,6 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) else be_parse_rx_compl_v0(compl, rxcp); - if (rxcp->ip_frag) - rxcp->l4_csum = 0; - if (rxcp->vlanf) { /* vlanf could be wrongly set in some cards. * ignore if vtm is not set */ @@ -2173,7 +2176,7 @@ static irqreturn_t be_msix(int irq, void *dev) static inline bool do_gro(struct be_rx_compl_info *rxcp) { - return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false; + return (rxcp->tcpf && !rxcp->err) ? true : false; } static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, @@ -4098,7 +4101,6 @@ static int be_get_initial_config(struct be_adapter *adapter) static int lancer_recover_func(struct be_adapter *adapter) { - struct device *dev = &adapter->pdev->dev; int status; status = lancer_test_and_set_rdy_state(adapter); @@ -4110,7 +4112,8 @@ static int lancer_recover_func(struct be_adapter *adapter) be_clear(adapter); - be_clear_all_error(adapter); + adapter->hw_error = false; + adapter->fw_timeout = false; status = be_setup(adapter); if (status) @@ -4122,13 +4125,13 @@ static int lancer_recover_func(struct be_adapter *adapter) goto err; } - dev_err(dev, "Error recovery successful\n"); + dev_err(&adapter->pdev->dev, + "Adapter SLIPORT recovery succeeded\n"); return 0; err: - if (status == -EAGAIN) - dev_err(dev, "Waiting for resource provisioning\n"); - else - dev_err(dev, "Error recovery failed\n"); + if (adapter->eeh_error) + dev_err(&adapter->pdev->dev, + "Adapter SLIPORT recovery failed\n"); return status; } @@ -4137,27 +4140,28 @@ static void be_func_recovery_task(struct work_struct *work) { struct be_adapter *adapter = container_of(work, struct be_adapter, func_recovery_work.work); - int status = 0; + int status; be_detect_error(adapter); if (adapter->hw_error && lancer_chip(adapter)) { + if (adapter->eeh_error) + goto out; + rtnl_lock(); netif_device_detach(adapter->netdev); rtnl_unlock(); status = lancer_recover_func(adapter); + if (!status) netif_device_attach(adapter->netdev); } - /* In Lancer, for all errors other than provisioning error (-EAGAIN), - * no need to attempt further recovery. - */ - if (!status || status == -EAGAIN) - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); +out: + schedule_delayed_work(&adapter->func_recovery_work, + msecs_to_jiffies(1000)); } static void be_worker(struct work_struct *work) @@ -4262,9 +4266,6 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) netdev->features |= NETIF_F_HIGHDMA; } else { status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (!status) - status = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); if (status) { dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); goto free_netdev; @@ -4443,19 +4444,20 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, dev_err(&adapter->pdev->dev, "EEH error detected\n"); - if (!adapter->eeh_error) { - adapter->eeh_error = true; + adapter->eeh_error = true; - cancel_delayed_work_sync(&adapter->func_recovery_work); + cancel_delayed_work_sync(&adapter->func_recovery_work); + rtnl_lock(); + netif_device_detach(netdev); + rtnl_unlock(); + + if (netif_running(netdev)) { rtnl_lock(); - netif_device_detach(netdev); - if (netif_running(netdev)) - be_close(netdev); + be_close(netdev); rtnl_unlock(); - - be_clear(adapter); } + be_clear(adapter); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; @@ -4480,6 +4482,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) int status; dev_info(&adapter->pdev->dev, "EEH reset\n"); + be_clear_all_error(adapter); status = pci_enable_device(pdev); if (status) @@ -4497,7 +4500,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; pci_cleanup_aer_uncorrect_error_status(pdev); - be_clear_all_error(adapter); return PCI_ERS_RESULT_RECOVERED; } diff --git a/trunk/drivers/net/ethernet/freescale/fec_main.c b/trunk/drivers/net/ethernet/freescale/fec_main.c index d48099f03b7f..aff0310a778b 100644 --- a/trunk/drivers/net/ethernet/freescale/fec_main.c +++ b/trunk/drivers/net/ethernet/freescale/fec_main.c @@ -87,8 +87,6 @@ #define FEC_QUIRK_HAS_GBIT (1 << 3) /* Controller has extend desc buffer */ #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) -/* Controller has hardware checksum support */ -#define FEC_QUIRK_HAS_CSUM (1 << 5) static struct platform_device_id fec_devtype[] = { { @@ -107,9 +105,9 @@ static struct platform_device_id fec_devtype[] = { }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM, + FEC_QUIRK_HAS_BUFDESC_EX, }, { - .name = "mvf600-fec", + .name = "mvf-fec", .driver_data = FEC_QUIRK_ENET_MAC, }, { /* sentinel */ @@ -122,7 +120,7 @@ enum imx_fec_type { IMX27_FEC, /* runs on i.mx27/35/51 */ IMX28_FEC, IMX6Q_FEC, - MVF600_FEC, + MVF_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -130,7 +128,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, - { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, + { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -451,7 +449,7 @@ fec_restart(struct net_device *ndev, int duplex) netif_device_detach(ndev); napi_disable(&fep->napi); netif_stop_queue(ndev); - netif_tx_lock_bh(ndev); + netif_tx_lock(ndev); } /* Whack a reset. We should wait for this. */ @@ -516,7 +514,6 @@ fec_restart(struct net_device *ndev, int duplex) /* Set MII speed */ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); -#if !defined(CONFIG_M5272) /* set RX checksum */ val = readl(fep->hwp + FEC_RACC); if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) @@ -524,7 +521,6 @@ fec_restart(struct net_device *ndev, int duplex) else val &= ~FEC_RACC_OPTIONS; writel(val, fep->hwp + FEC_RACC); -#endif /* * The phy interface and speed need to get configured @@ -577,7 +573,6 @@ fec_restart(struct net_device *ndev, int duplex) #endif } -#if !defined(CONFIG_M5272) /* enable pause frame*/ if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && @@ -595,7 +590,6 @@ fec_restart(struct net_device *ndev, int duplex) } else { rcntl &= ~FEC_ENET_FCE; } -#endif /* !defined(CONFIG_M5272) */ writel(rcntl, fep->hwp + FEC_R_CNTRL); @@ -620,10 +614,10 @@ fec_restart(struct net_device *ndev, int duplex) writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); if (netif_running(ndev)) { - netif_tx_unlock_bh(ndev); - netif_wake_queue(ndev); - napi_enable(&fep->napi); netif_device_attach(ndev); + napi_enable(&fep->napi); + netif_wake_queue(ndev); + netif_tx_unlock(ndev); } } @@ -1042,18 +1036,6 @@ static void fec_get_mac(struct net_device *ndev) iap = &tmpaddr[0]; } - /* - * 5) random mac address - */ - if (!is_valid_ether_addr(iap)) { - /* Report it and use a random ethernet address instead */ - netdev_err(ndev, "Invalid MAC address: %pM\n", iap); - eth_hw_addr_random(ndev); - netdev_info(ndev, "Using random MAC address: %pM\n", - ndev->dev_addr); - return; - } - memcpy(ndev->dev_addr, iap, ETH_ALEN); /* Adjust MAC if using macaddr */ @@ -1209,9 +1191,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) /* mask with MAC supported features */ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { phy_dev->supported &= PHY_GBIT_FEATURES; -#if !defined(CONFIG_M5272) phy_dev->supported |= SUPPORTED_Pause; -#endif } else phy_dev->supported &= PHY_BASIC_FEATURES; @@ -1396,8 +1376,6 @@ static int fec_enet_get_ts_info(struct net_device *ndev, } } -#if !defined(CONFIG_M5272) - static void fec_enet_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { @@ -1444,13 +1422,9 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, return 0; } -#endif /* !defined(CONFIG_M5272) */ - static const struct ethtool_ops fec_enet_ethtool_ops = { -#if !defined(CONFIG_M5272) .get_pauseparam = fec_enet_get_pauseparam, .set_pauseparam = fec_enet_set_pauseparam, -#endif .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, @@ -1770,8 +1744,6 @@ static const struct net_device_ops fec_netdev_ops = { static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct bufdesc *cbd_base; /* Allocate memory for buffer descriptors. */ @@ -1803,14 +1775,12 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); - if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { - /* enable hw accelerator */ - ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_RXCSUM); - ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_RXCSUM); - fep->csum_flags |= FLAG_RX_CSUM_ENABLED; - } + /* enable hw accelerator */ + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM); + ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM); + fep->csum_flags |= FLAG_RX_CSUM_ENABLED; fec_restart(ndev, 0); @@ -1886,12 +1856,10 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); -#if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ if (pdev->id_entry && (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; -#endif fep->hwp = devm_request_and_ioremap(&pdev->dev, r); fep->pdev = pdev; diff --git a/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c b/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c index 083ea2b4d20a..576e4b858fce 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -524,7 +524,6 @@ static int gianfar_ptp_probe(struct platform_device *dev) return 0; no_clock: - iounmap(etsects->regs); no_ioremap: release_resource(etsects->rsrc); no_resource: diff --git a/trunk/drivers/net/ethernet/ibm/emac/core.c b/trunk/drivers/net/ethernet/ibm/emac/core.c index d300a0c0eafc..4989481c19f0 100644 --- a/trunk/drivers/net/ethernet/ibm/emac/core.c +++ b/trunk/drivers/net/ethernet/ibm/emac/core.c @@ -359,26 +359,10 @@ static int emac_reset(struct emac_instance *dev) } #ifdef CONFIG_PPC_DCR_NATIVE - /* - * PPC460EX/GT Embedded Processor Advanced User's Manual - * section 28.10.1 Mode Register 0 (EMACx_MR0) states: - * Note: The PHY must provide a TX Clk in order to perform a soft reset - * of the EMAC. If none is present, select the internal clock - * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). - * After a soft reset, select the external clock. - */ - if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { - /* No PHY: select internal loop clock before reset */ - dcri_clrset(SDR0, SDR0_ETH_CFG, - 0, SDR0_ETH_CFG_ECS << dev->cell_index); - } else { - /* PHY present: select external clock before reset */ - dcri_clrset(SDR0, SDR0_ETH_CFG, - SDR0_ETH_CFG_ECS << dev->cell_index, 0); - } - } + /* Enable internal clock source */ + if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) + dcri_clrset(SDR0, SDR0_ETH_CFG, + 0, SDR0_ETH_CFG_ECS << dev->cell_index); #endif out_be32(&p->mr0, EMAC_MR0_SRST); @@ -386,14 +370,10 @@ static int emac_reset(struct emac_instance *dev) --n; #ifdef CONFIG_PPC_DCR_NATIVE - if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { - /* No PHY: restore external clock source after reset */ - dcri_clrset(SDR0, SDR0_ETH_CFG, - SDR0_ETH_CFG_ECS << dev->cell_index, 0); - } - } + /* Enable external clock source */ + if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) + dcri_clrset(SDR0, SDR0_ETH_CFG, + SDR0_ETH_CFG_ECS << dev->cell_index, 0); #endif if (n) { diff --git a/trunk/drivers/net/ethernet/icplus/ipg.h b/trunk/drivers/net/ethernet/icplus/ipg.h index abb300a31912..6ce027355fcf 100644 --- a/trunk/drivers/net/ethernet/icplus/ipg.h +++ b/trunk/drivers/net/ethernet/icplus/ipg.h @@ -195,57 +195,57 @@ enum ipg_regs { /* TFD data structure masks. */ /* TFDList, TFC */ -#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL -#define IPG_TFC_FRAMEID 0x000000000000FFFFULL -#define IPG_TFC_WORDALIGN 0x0000000000030000ULL -#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL -#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL -#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL -#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL -#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL -#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL -#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL -#define IPG_TFC_TXINDICATE 0x0000000000400000ULL -#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL -#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL -#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL -#define IPG_TFC_TFDDONE 0x0000000080000000ULL -#define IPG_TFC_VID 0x00000FFF00000000ULL -#define IPG_TFC_CFI 0x0000100000000000ULL -#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL +#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF +#define IPG_TFC_FRAMEID 0x000000000000FFFF +#define IPG_TFC_WORDALIGN 0x0000000000030000 +#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000 +#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 +#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000 +#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000 +#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000 +#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000 +#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000 +#define IPG_TFC_TXINDICATE 0x0000000000400000 +#define IPG_TFC_TXDMAINDICATE 0x0000000000800000 +#define IPG_TFC_FRAGCOUNT 0x000000000F000000 +#define IPG_TFC_VLANTAGINSERT 0x0000000010000000 +#define IPG_TFC_TFDDONE 0x0000000080000000 +#define IPG_TFC_VID 0x00000FFF00000000 +#define IPG_TFC_CFI 0x0000100000000000 +#define IPG_TFC_USERPRIORITY 0x0000E00000000000 /* TFDList, FragInfo */ -#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL -#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL -#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL +#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF +#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF +#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL /* RFD data structure masks. */ /* RFDList, RFS */ -#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL -#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL -#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL -#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL -#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL -#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL -#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL -#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL -#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL -#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL -#define IPG_RFS_TCPERROR 0x0000000001000000ULL -#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL -#define IPG_RFS_UDPERROR 0x0000000004000000ULL -#define IPG_RFS_IPDETECTED 0x0000000008000000ULL -#define IPG_RFS_IPERROR 0x0000000010000000ULL -#define IPG_RFS_FRAMESTART 0x0000000020000000ULL -#define IPG_RFS_FRAMEEND 0x0000000040000000ULL -#define IPG_RFS_RFDDONE 0x0000000080000000ULL -#define IPG_RFS_TCI 0x0000FFFF00000000ULL +#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF +#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF +#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 +#define IPG_RFS_RXRUNTFRAME 0x0000000000020000 +#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000 +#define IPG_RFS_RXFCSERROR 0x0000000000080000 +#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000 +#define IPG_RFS_RXLENGTHERROR 0x0000000000200000 +#define IPG_RFS_VLANDETECTED 0x0000000000400000 +#define IPG_RFS_TCPDETECTED 0x0000000000800000 +#define IPG_RFS_TCPERROR 0x0000000001000000 +#define IPG_RFS_UDPDETECTED 0x0000000002000000 +#define IPG_RFS_UDPERROR 0x0000000004000000 +#define IPG_RFS_IPDETECTED 0x0000000008000000 +#define IPG_RFS_IPERROR 0x0000000010000000 +#define IPG_RFS_FRAMESTART 0x0000000020000000 +#define IPG_RFS_FRAMEEND 0x0000000040000000 +#define IPG_RFS_RFDDONE 0x0000000080000000 +#define IPG_RFS_TCI 0x0000FFFF00000000 /* RFDList, FragInfo */ -#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL -#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL -#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL +#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF +#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF +#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL /* I/O Register masks. */ diff --git a/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c b/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c index d1cbfb12c1ca..d0afeea181fb 100644 --- a/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -867,7 +867,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); int reclaimed; - __netif_tx_lock_bh(nq); + __netif_tx_lock(nq, smp_processor_id()); reclaimed = 0; while (reclaimed < budget && txq->tx_desc_count > 0) { @@ -913,7 +913,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) dev_kfree_skb(skb); } - __netif_tx_unlock_bh(nq); + __netif_tx_unlock(nq); if (reclaimed < budget) mp->work_tx &= ~(1 << txq->index); @@ -1757,7 +1757,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) memset(rxq->rx_desc_area, 0, size); rxq->rx_desc_area_size = size; - rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), + rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb), GFP_KERNEL); if (rxq->rx_skb == NULL) goto out_free; @@ -2745,7 +2745,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); - netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT); + netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); init_timer(&mp->rx_oom); mp->rx_oom.data = (unsigned long)mp; diff --git a/trunk/drivers/net/ethernet/marvell/pxa168_eth.c b/trunk/drivers/net/ethernet/marvell/pxa168_eth.c index 1c8af8ba08d9..339bb323cb0c 100644 --- a/trunk/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/trunk/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1015,7 +1015,7 @@ static int rxq_init(struct net_device *dev) int rx_desc_num = pep->rx_ring_size; /* Allocate RX skb rings */ - pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, + pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, GFP_KERNEL); if (!pep->rx_skb) return -ENOMEM; @@ -1076,7 +1076,7 @@ static int txq_init(struct net_device *dev) int size = 0, i = 0; int tx_desc_num = pep->tx_ring_size; - pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, + pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, GFP_KERNEL); if (!pep->tx_skb) return -ENOMEM; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c b/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c index 0e572a527154..1df56cc50ee9 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -222,6 +222,8 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, * FLR process. The only non-zero result in the RESET command * is MLX4_DELAY_RESET_SLAVE*/ if ((MLX4_COMM_CMD_RESET == cmd)) { + mlx4_warn(dev, "Got slave FLRed from Communication" + " channel (ret:0x%x)\n", ret_from_pending); err = MLX4_DELAY_RESET_SLAVE; } else { mlx4_warn(dev, "Communication channel timed out\n"); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 89c47ea84b50..b35f94700093 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1323,7 +1323,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) priv->last_moder_time[ring] = moder_time; cq = &priv->rx_cq[ring]; cq->moder_time = moder_time; - cq->moder_cnt = priv->rx_frames; err = mlx4_en_set_cq_moder(priv, cq); if (err) en_err(priv, "Failed modifying moderation for cq:%d\n", @@ -2119,7 +2118,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_priv *priv; int i; int err; - u64 mac_u64; dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), MAX_TX_RINGS, MAX_RX_RINGS); @@ -2193,17 +2191,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->addr_len = ETH_ALEN; mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); if (!is_valid_ether_addr(dev->dev_addr)) { - if (mlx4_is_slave(priv->mdev->dev)) { - eth_hw_addr_random(dev); - en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); - mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr); - mdev->dev->caps.def_mac[priv->port] = mac_u64; - } else { - en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", - priv->port, dev->dev_addr); - err = -EINVAL; - goto out; - } + en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", + priv->port, dev->dev_addr); + err = -EINVAL; + goto out; } memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac)); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c index d3f508697a3d..91f2b2c43c12 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -60,7 +60,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; if (user_prio >= 0) { context->pri_path.sched_queue |= user_prio << 3; - context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; + context->pri_path.feup = 1 << 6; } context->pri_path.counter_index = 0xff; context->cqn_send = cpu_to_be32(cqn); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c index 2c97901c6a6d..b147bdd40768 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -131,9 +131,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [2] = "RSS XOR Hash Function support", [3] = "Device manage flow steering support", [4] = "Automatic MAC reassignment support", - [5] = "Time stamping support", - [6] = "VST (control vlan insertion/stripping) support", - [7] = "FSM (MAC anti-spoofing) support" + [5] = "Time stamping support" }; int i; @@ -840,16 +838,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_CMD_NATIVE); if (!err && dev->caps.function != slave) { + /* set slave default_mac address */ + MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET); + def_mac += slave << 8; /* if config MAC in DB use it */ if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac) def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; - else { - /* set slave default_mac address */ - MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET); - def_mac += slave << 8; - priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac; - } - MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); /* get port type - currently only eth is enabled */ diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c index 8a434997a0df..0d32a82458bf 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c @@ -632,9 +632,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.cqe_size = 32; } - dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; - mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); - slave_adjust_steering_mode(dev, &dev_cap, &hca_param); return 0; @@ -1293,6 +1290,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); u64 dma = (u64) priv->mfunc.vhcr_dma; + int num_of_reset_retries = NUM_OF_RESET_RETRIES; int ret_from_reset = 0; u32 slave_read; u32 cmd_channel_ver; @@ -1306,10 +1304,18 @@ static int mlx4_init_slave(struct mlx4_dev *dev) * NUM_OF_RESET_RETRIES times before leaving.*/ if (ret_from_reset) { if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { - mlx4_warn(dev, "slave is currently in the " - "middle of FLR. Deferring probe.\n"); - mutex_unlock(&priv->cmd.slave_cmd_mutex); - return -EPROBE_DEFER; + msleep(SLEEP_TIME_IN_RESET); + while (ret_from_reset && num_of_reset_retries) { + mlx4_warn(dev, "slave is currently in the" + "middle of FLR. retrying..." + "(try num:%d)\n", + (NUM_OF_RESET_RETRIES - + num_of_reset_retries + 1)); + ret_from_reset = + mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, + 0, MLX4_COMM_TIME); + num_of_reset_retries = num_of_reset_retries - 1; + } } else goto err; } @@ -1520,8 +1526,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) } else { err = mlx4_init_slave(dev); if (err) { - if (err != -EPROBE_DEFER) - mlx4_err(dev, "Failed to initialize slave\n"); + mlx4_err(dev, "Failed to initialize slave\n"); return err; } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1157f028a90f..e12e0d2e0ee0 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -372,29 +372,24 @@ static int update_vport_qp_param(struct mlx4_dev *dev, if (MLX4_QP_ST_RC == qp_type) return -EINVAL; - /* force strip vlan by clear vsd */ - qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); - if (0 != vp_oper->state.default_vlan) { - qpc->pri_path.vlan_control = - MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | - MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | - MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; - } else { /* priority tagged */ - qpc->pri_path.vlan_control = - MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | - MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; - } - - qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; qpc->pri_path.vlan_index = vp_oper->vlan_idx; - qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; - qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; + qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/ + qpc->pri_path.feup |= 1 << 3; /* set fvl bit */ qpc->pri_path.sched_queue &= 0xC7; qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; + mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n", + be32_to_cpu(qpc->local_qpn) & 0xffffff, port, + (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan, + vp_oper->vlan_idx, (int)(qpc->pri_path.feup), + (int)(qpc->pri_path.fl)); } if (vp_oper->state.spoofchk) { - qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; + qpc->pri_path.feup |= 1 << 5; /* set fsm bit */; qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; + mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n", + be32_to_cpu(qpc->local_qpn) & 0xffffff, port, + (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc, + vp_oper->mac_idx); } return 0; } diff --git a/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c b/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c index 91a8a5d28037..921729f9c85c 100644 --- a/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -46,25 +46,17 @@ union mgmt_port_ring_entry { u64 d64; struct { -#define RING_ENTRY_CODE_DONE 0xf -#define RING_ENTRY_CODE_MORE 0x10 -#ifdef __BIG_ENDIAN_BITFIELD - u64 reserved_62_63:2; + u64 reserved_62_63:2; /* Length of the buffer/packet in bytes */ - u64 len:14; + u64 len:14; /* For TX, signals that the packet should be timestamped */ - u64 tstamp:1; + u64 tstamp:1; /* The RX error code */ - u64 code:7; + u64 code:7; +#define RING_ENTRY_CODE_DONE 0xf +#define RING_ENTRY_CODE_MORE 0x10 /* Physical address of the buffer */ - u64 addr:40; -#else - u64 addr:40; - u64 code:7; - u64 tstamp:1; - u64 len:14; - u64 reserved_62_63:2; -#endif + u64 addr:40; } s; }; @@ -1149,13 +1141,10 @@ static int octeon_mgmt_open(struct net_device *netdev) /* For compensation state to lock. */ ndelay(1040 * NS_PER_PHY_CLK); - /* Default Interframe Gaps are too small. Recommended - * workaround is. - * - * AGL_GMX_TX_IFG[IFG1]=14 - * AGL_GMX_TX_IFG[IFG2]=10 + /* Some Ethernet switches cannot handle standard + * Interframe Gap, increase to 16 bytes. */ - cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); + cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88); } octeon_mgmt_rx_fill_ring(netdev); diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index c1b693cb3df3..90c253b145ef 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -429,7 +429,6 @@ struct qlcnic_hardware_context { u16 port_type; u16 board_type; - u16 supported_type; u16 link_speed; u16 link_duplex; @@ -907,11 +906,8 @@ struct qlcnic_ipaddr { #define QLCNIC_FW_HANG 0x4000 #define QLCNIC_FW_LRO_MSS_CAP 0x8000 #define QLCNIC_TX_INTR_SHARED 0x10000 -#define QLCNIC_APP_CHANGED_FLAGS 0x20000 #define QLCNIC_IS_MSI_FAMILY(adapter) \ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) -#define QLCNIC_IS_TSO_CAPABLE(adapter) \ - ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) #define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 #define QLCNIC_MSIX_TBL_SPACE 8192 @@ -1037,7 +1033,6 @@ struct qlcnic_adapter { spinlock_t rx_mac_learn_lock; u32 file_prd_off; /*File fw product offset*/ u32 fw_version; - u32 offload_flags; const struct firmware *fw; }; @@ -1519,7 +1514,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); -int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); @@ -1546,8 +1540,6 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16); int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); int qlcnic_read_mac_addr(struct qlcnic_adapter *); int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); -void qlcnic_set_netdev_features(struct qlcnic_adapter *, - struct qlcnic_esw_func_cfg *); void qlcnic_sriov_vf_schedule_multi(struct net_device *); void qlcnic_vf_add_mc_list(struct net_device *, u16); diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index b4ff1e35a11d..ea790a93ee7c 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -696,14 +696,15 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) return 1; } -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time) +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) { u32 data; + unsigned long wait_time = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; /* wait for mailbox completion */ do { data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); - if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) { + if (++wait_time > QLCNIC_MBX_TIMEOUT) { data = QLCNIC_RCODE_TIMEOUT; break; } @@ -719,8 +720,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, u16 opcode; u8 mbx_err_code; unsigned long flags; + u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd; struct qlcnic_hardware_context *ahw = adapter->ahw; - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0; opcode = LSW(cmd->req.arg[0]); if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { @@ -753,13 +754,15 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, /* Signal FW about the impending command */ QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); poll: - rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); + rsp = qlcnic_83xx_mbx_poll(adapter); if (rsp != QLCNIC_RCODE_TIMEOUT) { /* Get the FW response data */ fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - goto poll; + mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); + if (mbx_val) + goto poll; } mbx_err_code = QLCNIC_MBX_STATUS(fw_data); rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); @@ -1273,13 +1276,11 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, return err; } -static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, - int num_sds_ring) +static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_rds_ring *rds_ring; - u16 adapter_state = adapter->is_up; u8 ring; int ret; @@ -1303,10 +1304,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, ret = qlcnic_fw_create_ctx(adapter); if (ret) { qlcnic_detach(adapter); - if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) { - adapter->max_sds_rings = num_sds_ring; - qlcnic_attach(adapter); - } netif_device_attach(netdev); return ret; } @@ -1599,8 +1596,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; - ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, - max_sds_rings); + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); if (ret) goto fail_diag_alloc; @@ -2834,23 +2830,6 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) break; } config = cmd.rsp.arg[3]; - if (QLC_83XX_SFP_PRESENT(config)) { - switch (ahw->module_type) { - case LINKEVENT_MODULE_OPTICAL_UNKNOWN: - case LINKEVENT_MODULE_OPTICAL_SRLR: - case LINKEVENT_MODULE_OPTICAL_LRM: - case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ahw->supported_type = PORT_FIBRE; - break; - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: - case LINKEVENT_MODULE_TWINAX: - ahw->supported_type = PORT_TP; - break; - default: - ahw->supported_type = PORT_OTHER; - } - } if (config & 1) err = 1; } @@ -2859,8 +2838,7 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) return config; } -int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, - struct ethtool_cmd *ecmd) +int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) { u32 config = 0; int status = 0; @@ -2873,54 +2851,6 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); /* hard code until there is a way to get it from flash */ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; - - if (netif_running(adapter->netdev) && ahw->has_link_events) { - ethtool_cmd_speed_set(ecmd, ahw->link_speed); - ecmd->duplex = ahw->link_duplex; - ecmd->autoneg = ahw->link_autoneg; - } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - ecmd->autoneg = AUTONEG_DISABLE; - } - - if (ahw->port_type == QLCNIC_XGBE) { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; - } else { - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); - ecmd->advertising = (ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); - } - - switch (ahw->supported_type) { - case PORT_FIBRE: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; - break; - case PORT_TP: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->transceiver = XCVR_INTERNAL; - break; - default: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_OTHER; - ecmd->transceiver = XCVR_EXTERNAL; - break; - } - ecmd->phy_address = ahw->physical_port; return status; } @@ -3116,8 +3046,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EIO; - ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, - max_sds_rings); + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); if (ret) goto fail_diag_irq; diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index f5db67fc9f55..1f1d85e6f2af 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -603,7 +603,7 @@ int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); -int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); +int qlcnic_83xx_get_settings(struct qlcnic_adapter *); int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, struct ethtool_pauseparam *); @@ -620,7 +620,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *); int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); #endif diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 5e7fb1dfb97b..ab1d8d99cbd5 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -382,6 +382,8 @@ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter) clear_bit(__QLCNIC_RESETTING, &adapter->state); dev_err(&adapter->pdev->dev, "%s:\n", __func__); + adapter->netdev->trans_start = jiffies; + return 0; } @@ -433,6 +435,10 @@ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter) } done: netif_device_attach(netdev); + if (netif_running(netdev)) { + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } } static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, @@ -636,21 +642,15 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) { - struct qlcnic_hardware_context *ahw = adapter->ahw; - qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); + clear_bit(__QLCNIC_RESETTING, &adapter->state); set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); - - ahw->idc.quiesce_req = 0; - ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; - ahw->idc.err_code = 0; - ahw->idc.collect_dump = 0; - ahw->reset_context = 0; - adapter->tx_timeo_cnt = 0; - - clear_bit(__QLCNIC_RESETTING, &adapter->state); + adapter->ahw->idc.quiesce_req = 0; + adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + adapter->ahw->idc.err_code = 0; + adapter->ahw->idc.collect_dump = 0; } /** @@ -851,7 +851,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) /* Check for soft reset request */ if (ahw->reset_context && !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { - adapter->ahw->reset_context = 0; qlcnic_83xx_idc_tx_soft_reset(adapter); return ret; } @@ -915,7 +914,6 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) { dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); - clear_bit(__QLCNIC_RESETTING, &adapter->state); adapter->ahw->idc.err_code = -EIO; return 0; diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 6acf82b9f018..43562c256379 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -642,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) qlcnic_83xx_config_intrpt(adapter, 0); } /* Allow dma queues to drain after context reset */ - mdelay(20); + msleep(20); } } diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index f67652de5a63..08efb4635007 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -131,13 +131,12 @@ static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { "ctx_lro_pkt_cnt", "ctx_ip_csum_error", "ctx_rx_pkts_wo_ctx", - "ctx_rx_pkts_drop_wo_sds_on_card", - "ctx_rx_pkts_drop_wo_sds_on_host", + "ctx_rx_pkts_dropped_wo_sts", "ctx_rx_osized_pkts", "ctx_rx_pkts_dropped_wo_rds", "ctx_rx_unexpected_mcast_pkts", "ctx_invalid_mac_address", - "ctx_rx_rds_ring_prim_attempted", + "ctx_rx_rds_ring_prim_attemoted", "ctx_rx_rds_ring_prim_success", "ctx_num_lro_flows_added", "ctx_num_lro_flows_removed", @@ -252,18 +251,6 @@ static int qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct qlcnic_adapter *adapter = netdev_priv(dev); - - if (qlcnic_82xx_check(adapter)) - return qlcnic_82xx_get_settings(adapter, ecmd); - else if (qlcnic_83xx_check(adapter)) - return qlcnic_83xx_get_settings(adapter, ecmd); - - return -EIO; -} - -int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, - struct ethtool_cmd *ecmd) -{ struct qlcnic_hardware_context *ahw = adapter->ahw; u32 speed, reg; int check_sfp_module = 0; @@ -289,7 +276,10 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, } else if (adapter->ahw->port_type == QLCNIC_XGBE) { u32 val = 0; - val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); + if (qlcnic_83xx_check(adapter)) + qlcnic_83xx_get_settings(adapter); + else + val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); if (val == QLCNIC_PORT_MODE_802_3_AP) { ecmd->supported = SUPPORTED_1000baseT_Full; @@ -299,13 +289,16 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, ecmd->advertising = ADVERTISED_10000baseT_Full; } - if (netif_running(adapter->netdev) && ahw->has_link_events) { - reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); - speed = P3P_LINK_SPEED_VAL(pcifn, reg); - ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; - ethtool_cmd_speed_set(ecmd, ahw->link_speed); - ecmd->autoneg = ahw->link_autoneg; - ecmd->duplex = ahw->link_duplex; + if (netif_running(dev) && adapter->ahw->has_link_events) { + if (qlcnic_82xx_check(adapter)) { + reg = QLCRD32(adapter, + P3P_LINK_SPEED_REG(pcifn)); + speed = P3P_LINK_SPEED_VAL(pcifn, reg); + ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; + } + ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed); + ecmd->autoneg = adapter->ahw->link_autoneg; + ecmd->duplex = adapter->ahw->link_duplex; goto skip; } @@ -347,8 +340,8 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: ecmd->advertising |= ADVERTISED_TP; ecmd->supported |= SUPPORTED_TP; - check_sfp_module = netif_running(adapter->netdev) && - ahw->has_link_events; + check_sfp_module = netif_running(dev) && + adapter->ahw->has_link_events; case QLCNIC_BRDTYPE_P3P_10G_XFP: ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; @@ -362,8 +355,8 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(adapter->netdev) && - ahw->has_link_events; + check_sfp_module = netif_running(dev) && + adapter->ahw->has_link_events; } else { ecmd->autoneg = AUTONEG_ENABLE; ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); @@ -372,6 +365,13 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, ecmd->port = PORT_TP; } break; + case QLCNIC_BRDTYPE_83XX_10G: + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); + ecmd->port = PORT_FIBRE; + check_sfp_module = netif_running(dev) && ahw->has_link_events; + break; default: dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", adapter->ahw->board_type); diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 106a12f2a02f..6a6512ba9f38 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -973,57 +973,16 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu) return rc; } -static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter, - netdev_features_t features) -{ - u32 offload_flags = adapter->offload_flags; - - if (offload_flags & BIT_0) { - features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM; - adapter->rx_csum = 1; - if (QLCNIC_IS_TSO_CAPABLE(adapter)) { - if (!(offload_flags & BIT_1)) - features &= ~NETIF_F_TSO; - else - features |= NETIF_F_TSO; - - if (!(offload_flags & BIT_2)) - features &= ~NETIF_F_TSO6; - else - features |= NETIF_F_TSO6; - } - } else { - features &= ~(NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM); - - if (QLCNIC_IS_TSO_CAPABLE(adapter)) - features &= ~(NETIF_F_TSO | NETIF_F_TSO6); - adapter->rx_csum = 0; - } - - return features; -} netdev_features_t qlcnic_fix_features(struct net_device *netdev, netdev_features_t features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - netdev_features_t changed; - if (qlcnic_82xx_check(adapter) && - (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { - if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) { - features = qlcnic_process_flags(adapter, features); - } else { - changed = features ^ netdev->features; - features ^= changed & (NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6); - } + if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) && + qlcnic_82xx_check(adapter)) { + netdev_features_t changed = features ^ netdev->features; + features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); } if (!(features & NETIF_F_RXCSUM)) diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index b6818f4356b9..95b1b5732838 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -134,7 +134,7 @@ struct qlcnic_mailbox_metadata { #define QLCNIC_SET_OWNER 1 #define QLCNIC_CLR_OWNER 0 -#define QLCNIC_MBX_TIMEOUT 5000 +#define QLCNIC_MBX_TIMEOUT 10000 #define QLCNIC_MBX_RSP_OK 1 #define QLCNIC_MBX_PORT_RSP_OK 0x1a diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index aeb26a850679..264d5a4f8153 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -37,24 +37,24 @@ MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); int qlcnic_use_msi = 1; -MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)"); +MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); module_param_named(use_msi, qlcnic_use_msi, int, 0444); int qlcnic_use_msi_x = 1; -MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)"); +MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); int qlcnic_auto_fw_reset = 1; -MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)"); +MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); int qlcnic_load_fw_file; -MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); +MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); int qlcnic_config_npars; module_param(qlcnic_config_npars, int, 0444); -MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)"); +MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void qlcnic_remove(struct pci_dev *pdev); @@ -84,9 +84,14 @@ static int qlcnic_start_firmware(struct qlcnic_adapter *); static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); static int qlcnicvf_start_firmware(struct qlcnic_adapter *); +static void qlcnic_set_netdev_features(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16); static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16); +#define QLCNIC_IS_TSO_CAPABLE(adapter) \ + ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) + static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -303,23 +308,6 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) return 0; } -static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter) -{ - struct qlcnic_mac_list_s *cur; - struct list_head *head; - - list_for_each(head, &adapter->mac_list) { - cur = list_entry(head, struct qlcnic_mac_list_s, list); - if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) { - qlcnic_sre_macaddr_change(adapter, cur->mac_addr, - 0, QLCNIC_MAC_DEL); - list_del(&cur->list); - kfree(cur); - return; - } - } -} - static int qlcnic_set_mac(struct net_device *netdev, void *p) { struct qlcnic_adapter *adapter = netdev_priv(netdev); @@ -334,15 +322,11 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; - if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN)) - return 0; - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_device_detach(netdev); qlcnic_napi_disable(adapter); } - qlcnic_delete_adapter_mac(adapter); memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); qlcnic_set_multi(adapter->netdev); @@ -1069,6 +1053,8 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, if (!esw_cfg->promisc_mode) adapter->flags |= QLCNIC_PROMISC_DISABLED; + + qlcnic_set_netdev_features(adapter, esw_cfg); } int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) @@ -1083,23 +1069,51 @@ int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) return -EIO; qlcnic_set_vlan_config(adapter, &esw_cfg); qlcnic_set_eswitch_port_features(adapter, &esw_cfg); - qlcnic_set_netdev_features(adapter, &esw_cfg); return 0; } -void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) +static void +qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) { struct net_device *netdev = adapter->netdev; + unsigned long features, vlan_features; if (qlcnic_83xx_check(adapter)) return; - adapter->offload_flags = esw_cfg->offload_flags; - adapter->flags |= QLCNIC_APP_CHANGED_FLAGS; - netdev_update_features(netdev); - adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS; + features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_IPV6_CSUM | NETIF_F_GRO); + vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + + if (QLCNIC_IS_TSO_CAPABLE(adapter)) { + features |= (NETIF_F_TSO | NETIF_F_TSO6); + vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); + } + + if (netdev->features & NETIF_F_LRO) + features |= NETIF_F_LRO; + + if (esw_cfg->offload_flags & BIT_0) { + netdev->features |= features; + adapter->rx_csum = 1; + if (!(esw_cfg->offload_flags & BIT_1)) { + netdev->features &= ~NETIF_F_TSO; + features &= ~NETIF_F_TSO; + } + if (!(esw_cfg->offload_flags & BIT_2)) { + netdev->features &= ~NETIF_F_TSO6; + features &= ~NETIF_F_TSO6; + } + } else { + netdev->features &= ~features; + features &= ~features; + adapter->rx_csum = 0; + } + + netdev->vlan_features = (features & vlan_features); } static int @@ -1981,10 +1995,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_enable_pcie_error_reporting(pdev); ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL); - if (!ahw) { - err = -ENOMEM; + if (!ahw) goto err_out_free_res; - } switch (ent->device) { case PCI_DEVICE_ID_QLOGIC_QLE824X: @@ -2020,7 +2032,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic"); if (adapter->qlcnic_wq == NULL) { - err = -ENOMEM; dev_err(&pdev->dev, "Failed to create workqueue\n"); goto err_out_free_netdev; } @@ -2101,10 +2112,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable_msi; } - err = qlcnic_get_act_pci_func(adapter); - if (err) - goto err_out_disable_mbx_intr; - err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); if (err) goto err_out_disable_mbx_intr; @@ -2134,6 +2141,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } + if (qlcnic_get_act_pci_func(adapter)) + goto err_out_disable_mbx_intr; + if (adapter->drv_mac_learn) qlcnic_alloc_lb_filters_mem(adapter); @@ -2471,17 +2481,12 @@ static void qlcnic_tx_timeout(struct net_device *netdev) if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return; - if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) { - netdev_info(netdev, "Tx timeout, reset the adapter.\n"); - if (qlcnic_82xx_check(adapter)) - adapter->need_fw_reset = 1; - else if (qlcnic_83xx_check(adapter)) - qlcnic_83xx_idc_request_reset(adapter, - QLCNIC_FORCE_FW_DUMP_KEY); - } else { - netdev_info(netdev, "Tx timeout, reset adapter context.\n"); + dev_err(&netdev->dev, "transmit timeout, resetting.\n"); + + if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) + adapter->need_fw_reset = 1; + else adapter->ahw->reset_context = 1; - } } static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) @@ -3118,8 +3123,10 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) if (adapter->need_fw_reset) goto detach; - if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) + if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) { qlcnic_reset_hw_context(adapter); + adapter->netdev->trans_start = jiffies; + } return 0; } diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 196b2d100407..44d547d78b84 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -280,9 +280,9 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u8 pci_func, u8 size) { - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; unsigned long flags; + u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val; u16 opcode; u8 mbx_err_code; int i, j; @@ -330,13 +330,15 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, * assume something is wrong. */ poll: - rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); + rsp = qlcnic_83xx_mbx_poll(adapter); if (rsp != QLCNIC_RCODE_TIMEOUT) { /* Get the FW response data */ fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - goto poll; + mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); + if (mbx_val) + goto poll; } mbx_err_code = QLCNIC_MBX_STATUS(fw_data); rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); @@ -1734,6 +1736,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) if (!qlcnic_sriov_vf_reinit_driver(adapter)) { qlcnic_sriov_vf_attach(adapter); + adapter->netdev->trans_start = jiffies; adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; adapter->fw_fail_cnt = 0; diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 1a66ccded235..c81be2da119b 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1133,6 +1133,9 @@ static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf, if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) return -EINVAL; + if (!(cmd->req.arg[1] & BIT_8)) + return -EINVAL; + return 0; } diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index e7a2fe21b649..4e22e794a186 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -544,9 +544,6 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]); - rtnl_lock(); - qlcnic_set_netdev_features(adapter, &esw_cfg[i]); - rtnl_unlock(); break; case QLCNIC_ADD_VLAN: qlcnic_set_vlan_config(adapter, &esw_cfg[i]); diff --git a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c index f87cc216045b..87463bc701a6 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1106,7 +1106,6 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, if (pci_dma_mapping_error(qdev->pdev, map)) { __free_pages(rx_ring->pg_chunk.page, qdev->lbq_buf_order); - rx_ring->pg_chunk.page = NULL; netif_err(qdev, drv, qdev->ndev, "PCI mapping failed.\n"); return -ENOMEM; @@ -2778,12 +2777,6 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring curr_idx = 0; } - if (rx_ring->pg_chunk.page) { - pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, - ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); - put_page(rx_ring->pg_chunk.page); - rx_ring->pg_chunk.page = NULL; - } } static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) @@ -4717,7 +4710,6 @@ static int qlge_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "net device registration failed.\n"); ql_release_all(pdev); pci_disable_device(pdev); - free_netdev(ndev); return err; } /* Start up the timer to trigger EEH if diff --git a/trunk/drivers/net/ethernet/realtek/8139cp.c b/trunk/drivers/net/ethernet/realtek/8139cp.c index 03523459c406..7d1fb9ad1296 100644 --- a/trunk/drivers/net/ethernet/realtek/8139cp.c +++ b/trunk/drivers/net/ethernet/realtek/8139cp.c @@ -1136,7 +1136,6 @@ static void cp_clean_rings (struct cp_private *cp) cp->dev->stats.tx_dropped++; } } - netdev_reset_queue(cp->dev); memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); diff --git a/trunk/drivers/net/ethernet/realtek/r8169.c b/trunk/drivers/net/ethernet/realtek/r8169.c index 393f961a013c..79c520b64fdd 100644 --- a/trunk/drivers/net/ethernet/realtek/r8169.c +++ b/trunk/drivers/net/ethernet/realtek/r8169.c @@ -5856,20 +5856,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, return -EIO; } -static bool rtl_skb_pad(struct sk_buff *skb) -{ - if (skb_padto(skb, ETH_ZLEN)) - return false; - skb_put(skb, ETH_ZLEN - skb->len); - return true; -} - -static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) -{ - return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; -} - -static inline bool rtl8169_tso_csum(struct rtl8169_private *tp, +static inline void rtl8169_tso_csum(struct rtl8169_private *tp, struct sk_buff *skb, u32 *opts) { const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version; @@ -5882,20 +5869,13 @@ static inline bool rtl8169_tso_csum(struct rtl8169_private *tp, } else if (skb->ip_summed == CHECKSUM_PARTIAL) { const struct iphdr *ip = ip_hdr(skb); - if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb); - if (ip->protocol == IPPROTO_TCP) opts[offset] |= info->checksum.tcp; else if (ip->protocol == IPPROTO_UDP) opts[offset] |= info->checksum.udp; else WARN_ON_ONCE(1); - } else { - if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - return rtl_skb_pad(skb); } - return true; } static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, @@ -5916,15 +5896,17 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, goto err_stop_0; } + /* 8168evl does not automatically pad to minimum length. */ + if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 && + skb->len < ETH_ZLEN)) { + if (skb_padto(skb, ETH_ZLEN)) + goto err_update_stats; + skb_put(skb, ETH_ZLEN - skb->len); + } + if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) goto err_stop_0; - opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); - opts[0] = DescOwn; - - if (!rtl8169_tso_csum(tp, skb, opts)) - goto err_update_stats; - len = skb_headlen(skb); mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(d, mapping))) { @@ -5936,6 +5918,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->tx_skb[entry].len = len; txd->addr = cpu_to_le64(mapping); + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); + opts[0] = DescOwn; + + rtl8169_tso_csum(tp, skb, opts); + frags = rtl8169_xmit_frags(tp, skb, opts); if (frags < 0) goto err_dma_1; diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.c b/trunk/drivers/net/ethernet/renesas/sh_eth.c index e29fe8dbd226..33dc6f2418f2 100644 --- a/trunk/drivers/net/ethernet/renesas/sh_eth.c +++ b/trunk/drivers/net/ethernet/renesas/sh_eth.c @@ -380,9 +380,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = 0x01ff009f, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | + EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, @@ -428,9 +427,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | + EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, @@ -480,9 +478,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .rmcr_value = 0x00000001, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | + EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, @@ -595,9 +592,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | - EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, .fdr_value = 0x0000072f, @@ -677,9 +674,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | - EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, @@ -814,9 +811,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | - EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, @@ -900,8 +897,8 @@ static int sh_eth_check_reset(struct net_device *ndev) mdelay(1); cnt--; } - if (cnt <= 0) { - pr_err("Device reset failed\n"); + if (cnt < 0) { + pr_err("Device reset fail\n"); ret = -ETIMEDOUT; } return ret; @@ -1404,23 +1401,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status) desc_status = edmac_to_cpu(mdp, rxdesc->status); pkt_len = rxdesc->frame_length; +#if defined(CONFIG_ARCH_R8A7740) + desc_status >>= 16; +#endif + if (--boguscnt < 0) break; if (!(desc_status & RDFEND)) ndev->stats.rx_length_errors++; -#if defined(CONFIG_ARCH_R8A7740) - /* - * In case of almost all GETHER/ETHERs, the Receive Frame State - * (RFS) bits in the Receive Descriptor 0 are from bit 9 to - * bit 0. However, in case of the R8A7740's GETHER, the RFS - * bits are from bit 25 to bit 16. So, the driver needs right - * shifting by 16. - */ - desc_status >>= 16; -#endif - if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { ndev->stats.rx_errors++; @@ -1552,12 +1542,11 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) ignore_link: if (intr_status & EESR_TWB) { - /* Unused write back interrupt */ - if (intr_status & EESR_TABT) { /* Transmit Abort int */ + /* Write buck end. unused write back interrupt */ + if (intr_status & EESR_TABT) /* Transmit Abort int */ ndev->stats.tx_aborted_errors++; if (netif_msg_tx_err(mdp)) dev_err(&ndev->dev, "Transmit Abort\n"); - } } if (intr_status & EESR_RABT) { @@ -2756,6 +2745,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (mdp->cd->tsu) { struct resource *rtsu; rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!rtsu) { + dev_err(&pdev->dev, "Not found TSU resource\n"); + ret = -ENODEV; + goto out_release; + } mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); if (IS_ERR(mdp->tsu_addr)) { ret = PTR_ERR(mdp->tsu_addr); diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.h b/trunk/drivers/net/ethernet/renesas/sh_eth.h index 62689a5823be..1ddc9f235bcb 100644 --- a/trunk/drivers/net/ethernet/renesas/sh_eth.h +++ b/trunk/drivers/net/ethernet/renesas/sh_eth.h @@ -253,7 +253,7 @@ enum EESR_BIT { #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ EESR_RTO) -#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ +#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ EESR_TFE | EESR_TDE | EESR_ECI) #define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \ diff --git a/trunk/drivers/net/ethernet/sfc/efx.c b/trunk/drivers/net/ethernet/sfc/efx.c index 4a14a940c65e..01b99206139a 100644 --- a/trunk/drivers/net/ethernet/sfc/efx.c +++ b/trunk/drivers/net/ethernet/sfc/efx.c @@ -638,16 +638,14 @@ static void efx_start_datapath(struct efx_nic *efx) EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + efx->type->rx_buffer_padding); rx_buf_len = (sizeof(struct efx_rx_page_state) + - NET_IP_ALIGN + efx->rx_dma_len); + EFX_PAGE_IP_ALIGN + efx->rx_dma_len); if (rx_buf_len <= PAGE_SIZE) { efx->rx_scatter = false; efx->rx_buffer_order = 0; } else if (efx->type->can_rx_scatter) { - BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + - 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, - EFX_RX_BUF_ALIGNMENT) > - PAGE_SIZE); + EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE > + PAGE_SIZE / 2); efx->rx_scatter = true; efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; efx->rx_buffer_order = 0; @@ -2139,7 +2137,7 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); return sprintf(buf, "%d\n", efx->phy_type); } -static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); +static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); static int efx_register_netdev(struct efx_nic *efx) { diff --git a/trunk/drivers/net/ethernet/sfc/net_driver.h b/trunk/drivers/net/ethernet/sfc/net_driver.h index 39d6bd77f015..9bd433a095c5 100644 --- a/trunk/drivers/net/ethernet/sfc/net_driver.h +++ b/trunk/drivers/net/ethernet/sfc/net_driver.h @@ -72,20 +72,8 @@ /* Maximum possible MTU the driver supports */ #define EFX_MAX_MTU (9 * 1024) -/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page, - * and should be a multiple of the cache line size. - */ -#define EFX_RX_USR_BUF_SIZE (2048 - 256) - -/* If possible, we should ensure cache line alignment at start and end - * of every buffer. Otherwise, we just need to ensure 4-byte - * alignment of the network header. - */ -#if NET_IP_ALIGN == 0 -#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES -#else -#define EFX_RX_BUF_ALIGNMENT 4 -#endif +/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */ +#define EFX_RX_USR_BUF_SIZE 1824 /* Forward declare Precision Time Protocol (PTP) support structure. */ struct efx_ptp_data; @@ -479,12 +467,25 @@ enum nic_state { STATE_RECOVERY = 3, /* device recovering from PCI error */ }; +/* + * Alignment of page-allocated RX buffers + * + * Controls the number of bytes inserted at the start of an RX buffer. + * This is the equivalent of NET_IP_ALIGN [which controls the alignment + * of the skb->head for hardware DMA]. + */ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#define EFX_PAGE_IP_ALIGN 0 +#else +#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN +#endif + /* * Alignment of the skb->head which wraps a page-allocated RX buffer * * The skb allocated to wrap an rx_buffer can have this alignment. Since * the data is memcpy'd from the rx_buf, it does not need to be equal to - * NET_IP_ALIGN. + * EFX_PAGE_IP_ALIGN. */ #define EFX_PAGE_SKB_ALIGN 2 diff --git a/trunk/drivers/net/ethernet/sfc/rx.c b/trunk/drivers/net/ethernet/sfc/rx.c index a7dfe36cabf4..e73e30bac10e 100644 --- a/trunk/drivers/net/ethernet/sfc/rx.c +++ b/trunk/drivers/net/ethernet/sfc/rx.c @@ -93,8 +93,8 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, void efx_rx_config_page_split(struct efx_nic *efx) { - efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, - EFX_RX_BUF_ALIGNMENT); + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN, + L1_CACHE_BYTES); efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / efx->rx_page_buf_step); @@ -188,9 +188,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) do { index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; + rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; rx_buf->page = page; - rx_buf->page_offset = page_offset + NET_IP_ALIGN; + rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; rx_buf->len = efx->rx_dma_len; rx_buf->flags = 0; ++rx_queue->added_count; diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig b/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig index 43c1f3223322..f695a50bac47 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,6 +1,6 @@ config STMMAC_ETH tristate "STMicroelectronics 10/100/1000 Ethernet driver" - depends on HAS_IOMEM && HAS_DMA + depends on HAS_IOMEM select NET_CORE select MII select PHYLIB diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/common.h b/trunk/drivers/net/ethernet/stmicro/stmmac/common.h index 95176979b2d2..7788fbe44f0a 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/common.h @@ -297,8 +297,8 @@ struct dma_features { #define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ /* Default LPI timers */ -#define STMMAC_DEFAULT_LIT_LS 0x3E8 -#define STMMAC_DEFAULT_TWT_LS 0x0 +#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 +#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 #define STMMAC_CHAIN_MODE 0x1 #define STMMAC_RING_MODE 0x2 diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e9eab29db7be..618446ae1ec1 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -130,7 +130,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; module_param(eee_timer, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); -#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) +#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) /* By default the driver will use the ring mode to manage tx and rx descriptors * but passing this value so user can force to use the chain instead of the ring @@ -288,7 +288,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) struct stmmac_priv *priv = (struct stmmac_priv *)arg; stmmac_enable_eee_mode(priv); - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); } /** @@ -304,34 +304,22 @@ bool stmmac_eee_init(struct stmmac_priv *priv) { bool ret = false; - /* Using PCS we cannot dial with the phy registers at this stage - * so we do not support extra feature like EEE. - */ - if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) || - (priv->pcs == STMMAC_PCS_RTBI)) - goto out; - /* MAC core supports the EEE feature. */ if (priv->dma_cap.eee) { /* Check if the PHY supports EEE */ if (phy_init_eee(priv->phydev, 1)) goto out; - if (!priv->eee_active) { - priv->eee_active = 1; - init_timer(&priv->eee_ctrl_timer); - priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; - priv->eee_ctrl_timer.data = (unsigned long)priv; - priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); - add_timer(&priv->eee_ctrl_timer); - - priv->hw->mac->set_eee_timer(priv->ioaddr, - STMMAC_DEFAULT_LIT_LS, - priv->tx_lpi_timer); - } else - /* Set HW EEE according to the speed */ - priv->hw->mac->set_eee_pls(priv->ioaddr, - priv->phydev->link); + priv->eee_active = 1; + init_timer(&priv->eee_ctrl_timer); + priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; + priv->eee_ctrl_timer.data = (unsigned long)priv; + priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer); + add_timer(&priv->eee_ctrl_timer); + + priv->hw->mac->set_eee_timer(priv->ioaddr, + STMMAC_DEFAULT_LIT_LS_TIMER, + priv->tx_lpi_timer); pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); @@ -341,6 +329,20 @@ bool stmmac_eee_init(struct stmmac_priv *priv) return ret; } +/** + * stmmac_eee_adjust: adjust HW EEE according to the speed + * @priv: driver private structure + * Description: + * When the EEE has been already initialised we have to + * modify the PLS bit in the LPI ctrl & status reg according + * to the PHY link status. For this reason. + */ +static void stmmac_eee_adjust(struct stmmac_priv *priv) +{ + if (priv->eee_enabled) + priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); +} + /* stmmac_get_tx_hwtstamp: get HW TX timestamps * @priv: driver private structure * @entry : descriptor index to be used. @@ -767,10 +769,7 @@ static void stmmac_adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); - /* At this stage, it could be needed to setup the EEE or adjust some - * MAC related HW registers. - */ - priv->eee_enabled = stmmac_eee_init(priv); + stmmac_eee_adjust(priv); spin_unlock_irqrestore(&priv->lock, flags); @@ -1278,7 +1277,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { stmmac_enable_eee_mode(priv); - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); } spin_unlock(&priv->tx_lock); } @@ -1672,9 +1671,14 @@ static int stmmac_open(struct net_device *dev) if (priv->phydev) phy_start(priv->phydev); - priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; + priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; - priv->eee_enabled = stmmac_eee_init(priv); + /* Using PCS we cannot dial with the phy registers at this stage + * so we do not support extra feature like EEE. + */ + if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && + priv->pcs != STMMAC_PCS_RTBI) + priv->eee_enabled = stmmac_eee_init(priv); stmmac_init_tx_coalesce(priv); @@ -1895,7 +1899,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) #ifdef STMMAC_XMIT_DEBUG if (netif_msg_pktdata(priv)) { - pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", + pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d" __func__, (priv->cur_tx % txsize), (priv->dirty_tx % txsize), entry, first, nfrags); if (priv->extend_desc) diff --git a/trunk/drivers/net/ethernet/ti/cpsw.c b/trunk/drivers/net/ethernet/ti/cpsw.c index d1a769f35f9d..21a5b291b4b3 100644 --- a/trunk/drivers/net/ethernet/ti/cpsw.c +++ b/trunk/drivers/net/ethernet/ti/cpsw.c @@ -1679,7 +1679,7 @@ static int cpsw_probe(struct platform_device *pdev) priv->rx_packet_max = max(rx_packet_max, 128); priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); priv->irq_enabled = true; - if (!priv->cpts) { + if (!ndev) { pr_err("error allocating cpts\n"); goto clean_ndev_ret; } @@ -1973,12 +1973,9 @@ static int cpsw_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); - struct cpsw_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) cpsw_ndo_stop(ndev); - soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset); - soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset); pm_runtime_put_sync(&pdev->dev); return 0; diff --git a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c index 053c84fd0853..49dfd592ac1e 100644 --- a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c @@ -705,13 +705,6 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, } buffer = dma_map_single(ctlr->dev, data, len, chan->dir); - ret = dma_mapping_error(ctlr->dev, buffer); - if (ret) { - cpdma_desc_free(ctlr->pool, desc, 1); - ret = -EINVAL; - goto unlock_ret; - } - mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; cpdma_desc_to_port(chan, mode, directed); diff --git a/trunk/drivers/net/ethernet/ti/davinci_mdio.c b/trunk/drivers/net/ethernet/ti/davinci_mdio.c index c47f0dbcebb5..12aec173564c 100644 --- a/trunk/drivers/net/ethernet/ti/davinci_mdio.c +++ b/trunk/drivers/net/ethernet/ti/davinci_mdio.c @@ -449,9 +449,10 @@ static int davinci_mdio_suspend(struct device *dev) __raw_writel(ctrl, &data->regs->control); wait_for_idle(data); + pm_runtime_put_sync(data->dev); + data->suspended = true; spin_unlock(&data->lock); - pm_runtime_put_sync(data->dev); return 0; } @@ -459,12 +460,15 @@ static int davinci_mdio_suspend(struct device *dev) static int davinci_mdio_resume(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); + u32 ctrl; + spin_lock(&data->lock); pm_runtime_get_sync(data->dev); - spin_lock(&data->lock); /* restart the scan state machine */ - __davinci_mdio_reset(data); + ctrl = __raw_readl(&data->regs->control); + ctrl |= CONTROL_ENABLE; + __raw_writel(ctrl, &data->regs->control); data->suspended = false; spin_unlock(&data->lock); @@ -473,8 +477,8 @@ static int davinci_mdio_resume(struct device *dev) } static const struct dev_pm_ops davinci_mdio_pm_ops = { - .suspend_late = davinci_mdio_suspend, - .resume_early = davinci_mdio_resume, + .suspend = davinci_mdio_suspend, + .resume = davinci_mdio_resume, }; static const struct of_device_id davinci_mdio_of_mtable[] = { diff --git a/trunk/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/trunk/drivers/net/ethernet/xilinx/xilinx_emaclite.c index b7268b3dae77..919b983114e9 100644 --- a/trunk/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/trunk/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -946,8 +946,7 @@ static int xemaclite_open(struct net_device *dev) phy_write(lp->phy_dev, MII_CTRL1000, 0); /* Advertise only 10 and 100mbps full/half duplex speeds */ - phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL | - ADVERTISE_CSMA); + phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL); /* Restart auto negotiation */ bmcr = phy_read(lp->phy_dev, MII_BMCR); diff --git a/trunk/drivers/net/hyperv/netvsc_drv.c b/trunk/drivers/net/hyperv/netvsc_drv.c index 4dccead586be..088c55496191 100644 --- a/trunk/drivers/net/hyperv/netvsc_drv.c +++ b/trunk/drivers/net/hyperv/netvsc_drv.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -285,9 +284,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, skb->protocol = eth_type_trans(skb, net); skb->ip_summed = CHECKSUM_NONE; - if (packet->vlan_tci & VLAN_TAG_PRESENT) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - packet->vlan_tci); + skb->vlan_tci = packet->vlan_tci; net->stats.rx_packets++; net->stats.rx_bytes += packet->total_data_buflen; diff --git a/trunk/drivers/net/macvlan.c b/trunk/drivers/net/macvlan.c index 6e91931a1c2c..d5a141c7c4e7 100644 --- a/trunk/drivers/net/macvlan.c +++ b/trunk/drivers/net/macvlan.c @@ -229,8 +229,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) } if (port->passthru) - vlan = list_first_or_null_rcu(&port->vlans, - struct macvlan_dev, list); + vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); if (vlan == NULL) @@ -815,7 +814,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (err < 0) goto upper_dev_unlink; - list_add_tail_rcu(&vlan->list, &port->vlans); + list_add_tail(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; @@ -843,7 +842,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); - list_del_rcu(&vlan->list); + list_del(&vlan->list); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(vlan->lowerdev, dev); } @@ -853,24 +852,18 @@ static int macvlan_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct macvlan_dev *vlan = netdev_priv(dev); - + if (data && data[IFLA_MACVLAN_MODE]) + vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); if (data && data[IFLA_MACVLAN_FLAGS]) { __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; - if (vlan->port->passthru && promisc) { - int err; - - if (flags & MACVLAN_FLAG_NOPROMISC) - err = dev_set_promiscuity(vlan->lowerdev, -1); - else - err = dev_set_promiscuity(vlan->lowerdev, 1); - if (err < 0) - return err; - } + + if (promisc && (flags & MACVLAN_FLAG_NOPROMISC)) + dev_set_promiscuity(vlan->lowerdev, -1); + else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC)) + dev_set_promiscuity(vlan->lowerdev, 1); vlan->flags = flags; } - if (data && data[IFLA_MACVLAN_MODE]) - vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); return 0; } diff --git a/trunk/drivers/net/macvtap.c b/trunk/drivers/net/macvtap.c index b6dd6a75919a..59e9605de316 100644 --- a/trunk/drivers/net/macvtap.c +++ b/trunk/drivers/net/macvtap.c @@ -524,10 +524,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, return -EMSGSIZE; num_pages = get_user_pages_fast(base, size, 0, &page[i]); if (num_pages != size) { - int j; - - for (j = 0; j < num_pages; j++) - put_page(page[i + j]); + for (i = 0; i < num_pages; i++) + put_page(page[i]); return -EFAULT; } truesize = size * PAGE_SIZE; diff --git a/trunk/drivers/net/ntb_netdev.c b/trunk/drivers/net/ntb_netdev.c index f3cdf64997d6..ed947dd76fbd 100644 --- a/trunk/drivers/net/ntb_netdev.c +++ b/trunk/drivers/net/ntb_netdev.c @@ -375,8 +375,6 @@ static void ntb_netdev_remove(struct pci_dev *pdev) if (dev == NULL) return; - list_del(&dev->list); - ndev = dev->ndev; unregister_netdev(ndev); diff --git a/trunk/drivers/net/phy/phy.c b/trunk/drivers/net/phy/phy.c index 38f0b312ff85..c14f14741b3f 100644 --- a/trunk/drivers/net/phy/phy.c +++ b/trunk/drivers/net/phy/phy.c @@ -1044,7 +1044,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); idx = phy_find_setting(phydev->speed, phydev->duplex); - if (!(lp & adv & settings[idx].setting)) + if ((lp & adv & settings[idx].setting)) goto eee_exit; if (clk_stop_enable) { diff --git a/trunk/drivers/net/team/team.c b/trunk/drivers/net/team/team.c index b3051052f3ad..7c43261975bd 100644 --- a/trunk/drivers/net/team/team.c +++ b/trunk/drivers/net/team/team.c @@ -1092,8 +1092,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev) } port->index = -1; - list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); + list_add_tail_rcu(&port->list, &team->port_list); __team_compute_features(team); __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); __team_options_change_check(team); @@ -2374,8 +2374,7 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq, bool incomplete; int i; - port = list_first_entry_or_null(&team->port_list, - struct team_port, list); + port = list_first_entry(&team->port_list, struct team_port, list); start_again: err = __send_and_alloc_skb(&skb, team, portid, send_func); @@ -2403,8 +2402,8 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq, err = team_nl_fill_one_port_get(skb, one_port); if (err) goto errout; - } else if (port) { - list_for_each_entry_from(port, &team->port_list, list) { + } else { + list_for_each_entry(port, &team->port_list, list) { err = team_nl_fill_one_port_get(skb, port); if (err) { if (err == -EMSGSIZE) { diff --git a/trunk/drivers/net/team/team_mode_random.c b/trunk/drivers/net/team/team_mode_random.c index 7f032e211343..5ca14d463ba7 100644 --- a/trunk/drivers/net/team/team_mode_random.c +++ b/trunk/drivers/net/team/team_mode_random.c @@ -28,8 +28,6 @@ static bool rnd_transmit(struct team *team, struct sk_buff *skb) port_index = random_N(team->en_port_count); port = team_get_port_by_index_rcu(team, port_index); - if (unlikely(!port)) - goto drop; port = team_get_first_port_txable_rcu(team, port); if (unlikely(!port)) goto drop; diff --git a/trunk/drivers/net/team/team_mode_roundrobin.c b/trunk/drivers/net/team/team_mode_roundrobin.c index 472623f8ce3d..d268e4de781b 100644 --- a/trunk/drivers/net/team/team_mode_roundrobin.c +++ b/trunk/drivers/net/team/team_mode_roundrobin.c @@ -32,8 +32,6 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb) port_index = rr_priv(team)->sent_packets++ % team->en_port_count; port = team_get_port_by_index_rcu(team, port_index); - if (unlikely(!port)) - goto drop; port = team_get_first_port_txable_rcu(team, port); if (unlikely(!port)) goto drop; diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c index 9c61f8734a40..f042b0373e5d 100644 --- a/trunk/drivers/net/tun.c +++ b/trunk/drivers/net/tun.c @@ -352,7 +352,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) u32 numqueues = 0; rcu_read_lock(); - numqueues = ACCESS_ONCE(tun->numqueues); + numqueues = tun->numqueues; txq = skb_get_rxhash(skb); if (txq) { @@ -1010,10 +1010,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, return -EMSGSIZE; num_pages = get_user_pages_fast(base, size, 0, &page[i]); if (num_pages != size) { - int j; - - for (j = 0; j < num_pages; j++) - put_page(page[i + j]); + for (i = 0; i < num_pages; i++) + put_page(page[i]); return -EFAULT; } truesize = size * PAGE_SIZE; @@ -1587,10 +1585,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) else return -EINVAL; - if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != - !!(tun->flags & TUN_TAP_MQ)) - return -EINVAL; - if (tun_not_capable(tun)) return -EPERM; err = security_tun_dev_open(tun->security); @@ -2161,8 +2155,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags); INIT_LIST_HEAD(&tfile->next); - sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); - return 0; } diff --git a/trunk/drivers/net/usb/cdc_ether.c b/trunk/drivers/net/usb/cdc_ether.c index 04ee044dde51..078795fe6e31 100644 --- a/trunk/drivers/net/usb/cdc_ether.c +++ b/trunk/drivers/net/usb/cdc_ether.c @@ -627,12 +627,6 @@ static const struct usb_device_id products [] = { .driver_info = 0, }, -/* Huawei E1820 - handled by qmi_wwan */ -{ - USB_DEVICE_INTERFACE_NUMBER(HUAWEI_VENDOR_ID, 0x14ac, 1), - .driver_info = 0, -}, - /* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */ #if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE) { diff --git a/trunk/drivers/net/usb/qmi_wwan.c b/trunk/drivers/net/usb/qmi_wwan.c index 56459215a22b..cf887c2384e9 100644 --- a/trunk/drivers/net/usb/qmi_wwan.c +++ b/trunk/drivers/net/usb/qmi_wwan.c @@ -519,7 +519,6 @@ static const struct usb_device_id products[] = { /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ - {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, @@ -583,20 +582,13 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ - {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ - {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */ - {QMI_GOBI1K_DEVICE(0x1410, 0xa002)}, /* Novatel Gobi Modem device */ - {QMI_GOBI1K_DEVICE(0x1410, 0xa003)}, /* Novatel Gobi Modem device */ - {QMI_GOBI1K_DEVICE(0x1410, 0xa004)}, /* Novatel Gobi Modem device */ - {QMI_GOBI1K_DEVICE(0x1410, 0xa005)}, /* Novatel Gobi Modem device */ - {QMI_GOBI1K_DEVICE(0x1410, 0xa006)}, /* Novatel Gobi Modem device */ - {QMI_GOBI1K_DEVICE(0x1410, 0xa007)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ diff --git a/trunk/drivers/net/usb/rtl8150.c b/trunk/drivers/net/usb/rtl8150.c index 6cbdac67f3a0..a491d3a95393 100644 --- a/trunk/drivers/net/usb/rtl8150.c +++ b/trunk/drivers/net/usb/rtl8150.c @@ -130,23 +130,19 @@ struct rtl8150 { struct usb_device *udev; struct tasklet_struct tl; struct net_device *netdev; - struct urb *rx_urb, *tx_urb, *intr_urb; + struct urb *rx_urb, *tx_urb, *intr_urb, *ctrl_urb; struct sk_buff *tx_skb, *rx_skb; struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE]; spinlock_t rx_pool_lock; struct usb_ctrlrequest dr; int intr_interval; + __le16 rx_creg; u8 *intr_buff; u8 phy; }; typedef struct rtl8150 rtl8150_t; -struct async_req { - struct usb_ctrlrequest dr; - u16 rx_creg; -}; - static const char driver_name [] = "rtl8150"; /* @@ -168,47 +164,51 @@ static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) indx, 0, data, size, 500); } -static void async_set_reg_cb(struct urb *urb) +static void ctrl_callback(struct urb *urb) { - struct async_req *req = (struct async_req *)urb->context; + rtl8150_t *dev; int status = urb->status; - if (status < 0) - dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status); - kfree(req); - usb_free_urb(urb); + switch (status) { + case 0: + break; + case -EINPROGRESS: + break; + case -ENOENT: + break; + default: + if (printk_ratelimit()) + dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status); + } + dev = urb->context; + clear_bit(RX_REG_SET, &dev->flags); } -static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg) +static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size) { - int res = -ENOMEM; - struct urb *async_urb; - struct async_req *req; + int ret; - req = kmalloc(sizeof(struct async_req), GFP_ATOMIC); - if (req == NULL) - return res; - async_urb = usb_alloc_urb(0, GFP_ATOMIC); - if (async_urb == NULL) { - kfree(req); - return res; - } - req->rx_creg = cpu_to_le16(reg); - req->dr.bRequestType = RTL8150_REQT_WRITE; - req->dr.bRequest = RTL8150_REQ_SET_REGS; - req->dr.wIndex = 0; - req->dr.wValue = cpu_to_le16(indx); - req->dr.wLength = cpu_to_le16(size); - usb_fill_control_urb(async_urb, dev->udev, - usb_sndctrlpipe(dev->udev, 0), (void *)&req->dr, - &req->rx_creg, size, async_set_reg_cb, req); - res = usb_submit_urb(async_urb, GFP_ATOMIC); - if (res) { - if (res == -ENODEV) + if (test_bit(RX_REG_SET, &dev->flags)) + return -EAGAIN; + + dev->dr.bRequestType = RTL8150_REQT_WRITE; + dev->dr.bRequest = RTL8150_REQ_SET_REGS; + dev->dr.wValue = cpu_to_le16(indx); + dev->dr.wIndex = 0; + dev->dr.wLength = cpu_to_le16(size); + dev->ctrl_urb->transfer_buffer_length = size; + usb_fill_control_urb(dev->ctrl_urb, dev->udev, + usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr, + &dev->rx_creg, size, ctrl_callback, dev); + if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) { + if (ret == -ENODEV) netif_device_detach(dev->netdev); - dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res); - } - return res; + dev_err(&dev->udev->dev, + "control request submission failed: %d\n", ret); + } else + set_bit(RX_REG_SET, &dev->flags); + + return ret; } static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg) @@ -330,6 +330,13 @@ static int alloc_all_urbs(rtl8150_t * dev) usb_free_urb(dev->tx_urb); return 0; } + dev->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->ctrl_urb) { + usb_free_urb(dev->rx_urb); + usb_free_urb(dev->tx_urb); + usb_free_urb(dev->intr_urb); + return 0; + } return 1; } @@ -339,6 +346,7 @@ static void free_all_urbs(rtl8150_t * dev) usb_free_urb(dev->rx_urb); usb_free_urb(dev->tx_urb); usb_free_urb(dev->intr_urb); + usb_free_urb(dev->ctrl_urb); } static void unlink_all_urbs(rtl8150_t * dev) @@ -346,6 +354,7 @@ static void unlink_all_urbs(rtl8150_t * dev) usb_kill_urb(dev->rx_urb); usb_kill_urb(dev->tx_urb); usb_kill_urb(dev->intr_urb); + usb_kill_urb(dev->ctrl_urb); } static inline struct sk_buff *pull_skb(rtl8150_t *dev) @@ -620,6 +629,7 @@ static int enable_net_traffic(rtl8150_t * dev) } /* RCR bit7=1 attach Rx info at the end; =0 HW CRC (which is broken) */ rcr = 0x9e; + dev->rx_creg = cpu_to_le16(rcr); tcr = 0xd8; cr = 0x0c; if (!(rcr & 0x80)) @@ -652,22 +662,20 @@ static void rtl8150_tx_timeout(struct net_device *netdev) static void rtl8150_set_multicast(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); - u16 rx_creg = 0x9e; - netif_stop_queue(netdev); if (netdev->flags & IFF_PROMISC) { - rx_creg |= 0x0001; + dev->rx_creg |= cpu_to_le16(0x0001); dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name); } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { - rx_creg &= 0xfffe; - rx_creg |= 0x0002; + dev->rx_creg &= cpu_to_le16(0xfffe); + dev->rx_creg |= cpu_to_le16(0x0002); dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); } else { /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ - rx_creg &= 0x00fc; + dev->rx_creg &= cpu_to_le16(0x00fc); } - async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg); + async_set_registers(dev, RCR, 2); netif_wake_queue(netdev); } diff --git a/trunk/drivers/net/usb/usbnet.c b/trunk/drivers/net/usb/usbnet.c index 06ee82f557d4..f95cb032394b 100644 --- a/trunk/drivers/net/usb/usbnet.c +++ b/trunk/drivers/net/usb/usbnet.c @@ -1477,7 +1477,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) /* usbnet already took usb runtime pm, so have to enable the feature * for usb interface, otherwise usb_autopm_get_interface may return - * failure if RUNTIME_PM is enabled. + * failure if USB_SUSPEND(RUNTIME_PM) is enabled. */ if (!driver->supports_autosuspend) { driver->supports_autosuspend = 1; diff --git a/trunk/drivers/net/virtio_net.c b/trunk/drivers/net/virtio_net.c index c9e00387d999..3c23fdc27bf0 100644 --- a/trunk/drivers/net/virtio_net.c +++ b/trunk/drivers/net/virtio_net.c @@ -28,7 +28,7 @@ #include #include -static int napi_weight = NAPI_POLL_WEIGHT; +static int napi_weight = 128; module_param(napi_weight, int, 0444); static bool csum = true, gso = true; @@ -636,11 +636,10 @@ static int virtnet_open(struct net_device *dev) struct virtnet_info *vi = netdev_priv(dev); int i; - for (i = 0; i < vi->max_queue_pairs; i++) { - if (i < vi->curr_queue_pairs) - /* Make sure we have some buffers: if oom use wq. */ - if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); + for (i = 0; i < vi->curr_queue_pairs; i++) { + /* Make sure we have some buffers: if oom use wq. */ + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); virtnet_napi_enable(&vi->rq[i]); } diff --git a/trunk/drivers/net/vxlan.c b/trunk/drivers/net/vxlan.c index 57325f356d4f..ba81f3c39a83 100644 --- a/trunk/drivers/net/vxlan.c +++ b/trunk/drivers/net/vxlan.c @@ -301,7 +301,7 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, } /* Look up Ethernet address in forwarding table */ -static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, +static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, const u8 *mac) { @@ -316,18 +316,6 @@ static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, return NULL; } -static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, - const u8 *mac) -{ - struct vxlan_fdb *f; - - f = __vxlan_find_mac(vxlan, mac); - if (f) - f->used = jiffies; - - return f; -} - /* Add/update destinations for multicast */ static int vxlan_fdb_append(struct vxlan_fdb *f, __be32 ip, __be16 port, __u32 vni, __u32 ifindex) @@ -365,7 +353,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, struct vxlan_fdb *f; int notify = 0; - f = __vxlan_find_mac(vxlan, mac); + f = vxlan_find_mac(vxlan, mac); if (f) { if (flags & NLM_F_EXCL) { netdev_dbg(vxlan->dev, @@ -565,22 +553,19 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, /* Watch incoming packets to learn mapping between Ethernet address * and Tunnel endpoint. - * Return true if packet is bogus and should be droppped. */ -static bool vxlan_snoop(struct net_device *dev, +static void vxlan_snoop(struct net_device *dev, __be32 src_ip, const u8 *src_mac) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; + int err; f = vxlan_find_mac(vxlan, src_mac); if (likely(f)) { + f->used = jiffies; if (likely(f->remote.remote_ip == src_ip)) - return false; - - /* Don't migrate static entries, drop packets */ - if (f->state & NUD_NOARP) - return true; + return; if (net_ratelimit()) netdev_info(dev, @@ -592,19 +577,14 @@ static bool vxlan_snoop(struct net_device *dev, } else { /* learned new entry */ spin_lock(&vxlan->hash_lock); - - /* close off race between vxlan_flush and incoming packets */ - if (netif_running(dev)) - vxlan_fdb_create(vxlan, src_mac, src_ip, - NUD_REACHABLE, - NLM_F_EXCL|NLM_F_CREATE, - vxlan->dst_port, - vxlan->default_dst.remote_vni, - 0, NTF_SELF); + err = vxlan_fdb_create(vxlan, src_mac, src_ip, + NUD_REACHABLE, + NLM_F_EXCL|NLM_F_CREATE, + vxlan->dst_port, + vxlan->default_dst.remote_vni, + 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); } - - return false; } @@ -736,9 +716,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) vxlan->dev->dev_addr) == 0) goto drop; - if ((vxlan->flags & VXLAN_F_LEARN) && - vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source)) - goto drop; + if (vxlan->flags & VXLAN_F_LEARN) + vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source); __skb_tunnel_rx(skb, vxlan->dev); skb_reset_network_header(skb); @@ -1161,11 +1140,9 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) struct sk_buff *skb1; skb1 = skb_clone(skb, GFP_ATOMIC); - if (skb1) { - rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); - if (rc == NETDEV_TX_OK) - rc = rc1; - } + rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); + if (rc == NETDEV_TX_OK) + rc = rc1; } rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc); diff --git a/trunk/drivers/net/wan/dlci.c b/trunk/drivers/net/wan/dlci.c index 6a8a382c5f4c..147614ed86aa 100644 --- a/trunk/drivers/net/wan/dlci.c +++ b/trunk/drivers/net/wan/dlci.c @@ -384,37 +384,21 @@ static int dlci_del(struct dlci_add *dlci) struct frad_local *flp; struct net_device *master, *slave; int err; - bool found = false; - - rtnl_lock(); /* validate slave device */ master = __dev_get_by_name(&init_net, dlci->devname); - if (!master) { - err = -ENODEV; - goto out; - } - - list_for_each_entry(dlp, &dlci_devs, list) { - if (dlp->master == master) { - found = true; - break; - } - } - if (!found) { - err = -ENODEV; - goto out; - } + if (!master) + return -ENODEV; if (netif_running(master)) { - err = -EBUSY; - goto out; + return -EBUSY; } dlp = netdev_priv(master); slave = dlp->slave; flp = netdev_priv(slave); + rtnl_lock(); err = (*flp->deassoc)(slave, master); if (!err) { list_del(&dlp->list); @@ -423,8 +407,8 @@ static int dlci_del(struct dlci_add *dlci) dev_put(slave); } -out: rtnl_unlock(); + return err; } diff --git a/trunk/drivers/net/wireless/ath/ath5k/base.c b/trunk/drivers/net/wireless/ath/ath5k/base.c index 7f702fe3ecc2..9b20d9ee2719 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/base.c +++ b/trunk/drivers/net/wireless/ath/ath5k/base.c @@ -2369,9 +2369,6 @@ ath5k_tx_complete_poll_work(struct work_struct *work) int i; bool needreset = false; - if (!test_bit(ATH_STAT_STARTED, ah->status)) - return; - mutex_lock(&ah->lock); for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { @@ -2679,7 +2676,6 @@ int ath5k_start(struct ieee80211_hw *hw) mmiowb(); mutex_unlock(&ah->lock); - set_bit(ATH_STAT_STARTED, ah->status); ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); @@ -2741,7 +2737,6 @@ void ath5k_stop(struct ieee80211_hw *hw) ath5k_stop_tasklets(ah); - clear_bit(ATH_STAT_STARTED, ah->status); cancel_delayed_work_sync(&ah->tx_complete_work); if (!ath5k_modparam_no_hw_rfkill_switch) diff --git a/trunk/drivers/net/wireless/ath/ath9k/Kconfig b/trunk/drivers/net/wireless/ath/ath9k/Kconfig index 3c2cbc9d6295..17507dc8a1e7 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/Kconfig +++ b/trunk/drivers/net/wireless/ath/ath9k/Kconfig @@ -17,7 +17,7 @@ config ATH9K_BTCOEX_SUPPORT config ATH9K tristate "Atheros 802.11n wireless cards support" - depends on MAC80211 && HAS_DMA + depends on MAC80211 select ATH9K_HW select MAC80211_LEDS select LEDS_CLASS @@ -92,17 +92,13 @@ config ATH9K_MAC_DEBUG This option enables collection of statistics for Rx/Tx status data and some other MAC related statistics -config ATH9K_LEGACY_RATE_CONTROL +config ATH9K_RATE_CONTROL bool "Atheros ath9k rate control" depends on ATH9K - default n + default y ---help--- Say Y, if you want to use the ath9k specific rate control - module instead of minstrel_ht. Be warned that there are various - issues with the ath9k RC and minstrel is a more robust algorithm. - Note that even if this option is selected, "ath9k_rate_control" - has to be passed to mac80211 using the module parameter, - ieee80211_default_rc_algo. + module instead of minstrel_ht. config ATH9K_HTC tristate "Atheros HTC based wireless cards support" diff --git a/trunk/drivers/net/wireless/ath/ath9k/Makefile b/trunk/drivers/net/wireless/ath/ath9k/Makefile index 75ee9e7704ce..2ad8f9474ba1 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/Makefile +++ b/trunk/drivers/net/wireless/ath/ath9k/Makefile @@ -8,7 +8,7 @@ ath9k-y += beacon.o \ antenna.o ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o -ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o +ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o ath9k-$(CONFIG_ATH9K_PCI) += pci.o ath9k-$(CONFIG_ATH9K_AHB) += ahb.o ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/trunk/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 7546b9a7dcbf..db5ffada2217 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -958,11 +958,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = { {0x0000a074, 0x00000000}, {0x0000a078, 0x00000000}, {0x0000a07c, 0x00000000}, - {0x0000a080, 0x22222229}, - {0x0000a084, 0x1d1d1d1d}, - {0x0000a088, 0x1d1d1d1d}, - {0x0000a08c, 0x1d1d1d1d}, - {0x0000a090, 0x171d1d1d}, + {0x0000a080, 0x1a1a1a1a}, + {0x0000a084, 0x1a1a1a1a}, + {0x0000a088, 0x1a1a1a1a}, + {0x0000a08c, 0x1a1a1a1a}, + {0x0000a090, 0x171a1a1a}, {0x0000a094, 0x11111717}, {0x0000a098, 0x00030311}, {0x0000a09c, 0x00000000}, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 6988e1d081f2..639ba7d18ea4 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -965,7 +965,7 @@ static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah, { int i; - if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah)) + if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah)) return; for (i = 0; i < AR9300_MAX_CHAINS; i++) { diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 874f6570bd1c..54ba42f4108a 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -68,16 +68,13 @@ #define AR9300_BASE_ADDR 0x3ff #define AR9300_BASE_ADDR_512 0x1ff -#define AR9300_OTP_BASE \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) -#define AR9300_OTP_STATUS \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18) +#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000) +#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18) #define AR9300_OTP_STATUS_TYPE 0x7 #define AR9300_OTP_STATUS_VALID 0x4 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 #define AR9300_OTP_STATUS_SM_BUSY 0x1 -#define AR9300_OTP_READ_DATA \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c) +#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c) enum targetPowerHTRates { HT_TARGET_RATE_0_8_16, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c index e1714d7c9eeb..2bf6548dd143 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -334,8 +334,7 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1); - if (!AR_SREV_9340(ah) && - REG_READ_FIELD(ah, AR_PHY_MODE, + if (REG_READ_FIELD(ah, AR_PHY_MODE, AR_PHY_MODE_DYNAMIC) == 0x1) REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1); diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/trunk/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index 88ff1d7b53ab..712f415b8c08 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -1020,7 +1020,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18}, + {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/trunk/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h index e85a8b076c22..0c2ac0c6dc89 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h @@ -233,9 +233,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = { {0x00009d10, 0x01834061}, {0x00009d14, 0x00c00400}, {0x00009d18, 0x00000000}, - {0x00009e08, 0x0038230c}, - {0x00009e24, 0x9907b515}, - {0x00009e28, 0x126f0600}, + {0x00009e08, 0x0078230c}, + {0x00009e24, 0x990bb515}, + {0x00009e28, 0x126f0000}, {0x00009e30, 0x06336f77}, {0x00009e34, 0x6af6532f}, {0x00009e38, 0x0cc80c00}, @@ -337,7 +337,7 @@ static const u32 ar9565_1p0_baseband_core[][2] = { static const u32 ar9565_1p0_baseband_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8009}, + {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d}, {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81}, @@ -345,9 +345,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = { {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, - {0x00009e04, 0x00802020, 0x00802020, 0x00142020, 0x00142020}, - {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, - {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e}, + {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, + {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, + {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, @@ -450,8 +450,6 @@ static const u32 ar9565_1p0_soc_postamble[][5] = { static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { /* Addr allmodes */ - {0x00004050, 0x00300300}, - {0x0000406c, 0x00100000}, {0x0000a000, 0x00010000}, {0x0000a004, 0x00030002}, {0x0000a008, 0x00050004}, @@ -500,27 +498,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { {0x0000a0b4, 0x00000000}, {0x0000a0b8, 0x00000000}, {0x0000a0bc, 0x00000000}, - {0x0000a0c0, 0x00bf00a0}, - {0x0000a0c4, 0x11a011a1}, - {0x0000a0c8, 0x11be11bf}, - {0x0000a0cc, 0x11bc11bd}, - {0x0000a0d0, 0x22632264}, - {0x0000a0d4, 0x22612262}, - {0x0000a0d8, 0x227f2260}, - {0x0000a0dc, 0x4322227e}, - {0x0000a0e0, 0x43204321}, - {0x0000a0e4, 0x433e433f}, - {0x0000a0e8, 0x4462433d}, - {0x0000a0ec, 0x44604461}, - {0x0000a0f0, 0x447e447f}, - {0x0000a0f4, 0x5582447d}, - {0x0000a0f8, 0x55805581}, - {0x0000a0fc, 0x559e559f}, - {0x0000a100, 0x66816682}, - {0x0000a104, 0x669f6680}, - {0x0000a108, 0x669d669e}, - {0x0000a10c, 0x77627763}, - {0x0000a110, 0x77607761}, + {0x0000a0c0, 0x001f0000}, + {0x0000a0c4, 0x01000101}, + {0x0000a0c8, 0x011e011f}, + {0x0000a0cc, 0x011c011d}, + {0x0000a0d0, 0x02030204}, + {0x0000a0d4, 0x02010202}, + {0x0000a0d8, 0x021f0200}, + {0x0000a0dc, 0x0302021e}, + {0x0000a0e0, 0x03000301}, + {0x0000a0e4, 0x031e031f}, + {0x0000a0e8, 0x0402031d}, + {0x0000a0ec, 0x04000401}, + {0x0000a0f0, 0x041e041f}, + {0x0000a0f4, 0x0502041d}, + {0x0000a0f8, 0x05000501}, + {0x0000a0fc, 0x051e051f}, + {0x0000a100, 0x06010602}, + {0x0000a104, 0x061f0600}, + {0x0000a108, 0x061d061e}, + {0x0000a10c, 0x07020703}, + {0x0000a110, 0x07000701}, {0x0000a114, 0x00000000}, {0x0000a118, 0x00000000}, {0x0000a11c, 0x00000000}, @@ -532,27 +530,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { {0x0000a134, 0x00000000}, {0x0000a138, 0x00000000}, {0x0000a13c, 0x00000000}, - {0x0000a140, 0x00bf00a0}, - {0x0000a144, 0x11a011a1}, - {0x0000a148, 0x11be11bf}, - {0x0000a14c, 0x11bc11bd}, - {0x0000a150, 0x22632264}, - {0x0000a154, 0x22612262}, - {0x0000a158, 0x227f2260}, - {0x0000a15c, 0x4322227e}, - {0x0000a160, 0x43204321}, - {0x0000a164, 0x433e433f}, - {0x0000a168, 0x4462433d}, - {0x0000a16c, 0x44604461}, - {0x0000a170, 0x447e447f}, - {0x0000a174, 0x5582447d}, - {0x0000a178, 0x55805581}, - {0x0000a17c, 0x559e559f}, - {0x0000a180, 0x66816682}, - {0x0000a184, 0x669f6680}, - {0x0000a188, 0x669d669e}, - {0x0000a18c, 0x77e677e7}, - {0x0000a190, 0x77e477e5}, + {0x0000a140, 0x001f0000}, + {0x0000a144, 0x01000101}, + {0x0000a148, 0x011e011f}, + {0x0000a14c, 0x011c011d}, + {0x0000a150, 0x02030204}, + {0x0000a154, 0x02010202}, + {0x0000a158, 0x021f0200}, + {0x0000a15c, 0x0302021e}, + {0x0000a160, 0x03000301}, + {0x0000a164, 0x031e031f}, + {0x0000a168, 0x0402031d}, + {0x0000a16c, 0x04000401}, + {0x0000a170, 0x041e041f}, + {0x0000a174, 0x0502041d}, + {0x0000a178, 0x05000501}, + {0x0000a17c, 0x051e051f}, + {0x0000a180, 0x06010602}, + {0x0000a184, 0x061f0600}, + {0x0000a188, 0x061d061e}, + {0x0000a18c, 0x07020703}, + {0x0000a190, 0x07000701}, {0x0000a194, 0x00000000}, {0x0000a198, 0x00000000}, {0x0000a19c, 0x00000000}, @@ -772,7 +770,7 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = { static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = { /* Addr allmodes */ - {0x00018c00, 0x18212ede}, + {0x00018c00, 0x18213ede}, {0x00018c04, 0x000801d8}, {0x00018c08, 0x0003780c}, }; @@ -891,8 +889,8 @@ static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = { {0x0000a180, 0x66816682}, {0x0000a184, 0x669f6680}, {0x0000a188, 0x669d669e}, - {0x0000a18c, 0x77e677e7}, - {0x0000a190, 0x77e477e5}, + {0x0000a18c, 0x77627763}, + {0x0000a190, 0x77607761}, {0x0000a194, 0x00000000}, {0x0000a198, 0x00000000}, {0x0000a19c, 0x00000000}, @@ -1116,7 +1114,7 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = { {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004}, @@ -1142,13 +1140,13 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = { {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5}, {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9}, {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb}, - {0x0000a564, 0x7804ff56, 0x7804ff56, 0x60001cf0, 0x60001cf0}, - {0x0000a568, 0x7804ff56, 0x7804ff56, 0x61001cf1, 0x61001cf1}, - {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x62001cf2, 0x62001cf2}, - {0x0000a570, 0x7804ff56, 0x7804ff56, 0x63001cf3, 0x63001cf3}, - {0x0000a574, 0x7804ff56, 0x7804ff56, 0x64001cf4, 0x64001cf4}, - {0x0000a578, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6}, - {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6}, + {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, + {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, + {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, + {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, + {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, + {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, + {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, @@ -1176,7 +1174,7 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = { {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, @@ -1202,13 +1200,13 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = { {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, - {0x0000a564, 0x7504ff56, 0x7504ff56, 0x59001cf0, 0x59001cf0}, - {0x0000a568, 0x7504ff56, 0x7504ff56, 0x5a001cf1, 0x5a001cf1}, - {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x5b001cf2, 0x5b001cf2}, - {0x0000a570, 0x7504ff56, 0x7504ff56, 0x5c001cf3, 0x5c001cf3}, - {0x0000a574, 0x7504ff56, 0x7504ff56, 0x5d001cf4, 0x5d001cf4}, - {0x0000a578, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6}, - {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6}, + {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h index 42b03dc39d14..8a1888d02070 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h @@ -251,9 +251,9 @@ struct ath_atx_tid { int tidno; int baw_head; /* first un-acked tx buffer */ int baw_tail; /* next unused tx buffer slot */ - bool sched; - bool paused; - bool active; + int sched; + int paused; + u8 state; }; struct ath_node { @@ -274,6 +274,10 @@ struct ath_node { #endif }; +#define AGGR_CLEANUP BIT(1) +#define AGGR_ADDBA_COMPLETE BIT(2) +#define AGGR_ADDBA_PROGRESS BIT(3) + struct ath_tx_control { struct ath_txq *txq; struct ath_node *an; diff --git a/trunk/drivers/net/wireless/ath/ath9k/debug.c b/trunk/drivers/net/wireless/ath/ath9k/debug.c index b37eb8d38811..e6307b86363a 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/debug.c +++ b/trunk/drivers/net/wireless/ath/ath9k/debug.c @@ -2008,14 +2008,6 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw, WARN_ON(i != ATH9K_SSTATS_LEN); } -void ath9k_deinit_debug(struct ath_softc *sc) -{ - if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) { - relay_close(sc->rfs_chan_spec_scan); - sc->rfs_chan_spec_scan = NULL; - } -} - int ath9k_init_debug(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); diff --git a/trunk/drivers/net/wireless/ath/ath9k/debug.h b/trunk/drivers/net/wireless/ath/ath9k/debug.h index 9d49aab8b989..794a7ec83a24 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/debug.h +++ b/trunk/drivers/net/wireless/ath/ath9k/debug.h @@ -304,7 +304,6 @@ struct ath9k_debug { }; int ath9k_init_debug(struct ath_hw *ah); -void ath9k_deinit_debug(struct ath_softc *sc); void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, @@ -340,10 +339,6 @@ static inline int ath9k_init_debug(struct ath_hw *ah) return 0; } -static inline void ath9k_deinit_debug(struct ath_softc *sc) -{ -} - static inline void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status) { diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 62f1b7636c92..0743a47cef8f 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1174,7 +1174,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&priv->htc_pm_lock); priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); - if (!priv->ps_idle) + if (priv->ps_idle) chip_reset = true; mutex_unlock(&priv->htc_pm_lock); diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.c b/trunk/drivers/net/wireless/ath/ath9k/hw.c index 15dfefcf2d0f..7f25da8444fe 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.c +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.c @@ -1172,7 +1172,6 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan) static inline void ath9k_hw_set_dma(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - int txbuf_size; ENABLE_REGWRITE_BUFFER(ah); @@ -1226,17 +1225,13 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah) * So set the usable tx buf size also to half to * avoid data/delimiter underruns */ - txbuf_size = AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE; - } else if (AR_SREV_9340_13_OR_LATER(ah)) { - /* Uses fewer entries for AR934x v1.3+ to prevent rx overruns */ - txbuf_size = AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE; - } else { - txbuf_size = AR_PCU_TXBUF_CTRL_USABLE_SIZE; + REG_WRITE(ah, AR_PCU_TXBUF_CTRL, + AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE); + } else if (!AR_SREV_9271(ah)) { + REG_WRITE(ah, AR_PCU_TXBUF_CTRL, + AR_PCU_TXBUF_CTRL_USABLE_SIZE); } - if (!AR_SREV_9271(ah)) - REG_WRITE(ah, AR_PCU_TXBUF_CTRL, txbuf_size); - REGWRITE_BUFFER_FLUSH(ah); if (AR_SREV_9300_20_OR_LATER(ah)) @@ -1311,13 +1306,9 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; } else { tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); - if (AR_SREV_9340(ah)) - tmpReg &= AR9340_INTR_SYNC_LOCAL_TIMEOUT; - else - tmpReg &= AR_INTR_SYNC_LOCAL_TIMEOUT | - AR_INTR_SYNC_RADM_CPL_TIMEOUT; - - if (tmpReg) { + if (tmpReg & + (AR_INTR_SYNC_LOCAL_TIMEOUT | + AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { u32 val; REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); diff --git a/trunk/drivers/net/wireless/ath/ath9k/init.c b/trunk/drivers/net/wireless/ath/ath9k/init.c index 2ba494567777..0237b2868961 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/init.c +++ b/trunk/drivers/net/wireless/ath/ath9k/init.c @@ -787,7 +787,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->iface_combinations = if_comb; hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + if (AR_SREV_5416(sc->sc_ah)) + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; @@ -829,6 +830,10 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) sc->ant_rx = hw->wiphy->available_antennas_rx; sc->ant_tx = hw->wiphy->available_antennas_tx; +#ifdef CONFIG_ATH9K_RATE_CONTROL + hw->rate_control_algorithm = "ath9k_rate_control"; +#endif + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &sc->sbands[IEEE80211_BAND_2GHZ]; @@ -901,7 +906,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, if (!ath_is_world_regd(reg)) { error = regulatory_hint(hw->wiphy, reg->alpha2); if (error) - goto debug_cleanup; + goto unregister; } ath_init_leds(sc); @@ -909,8 +914,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, return 0; -debug_cleanup: - ath9k_deinit_debug(sc); unregister: ieee80211_unregister_hw(hw); rx_cleanup: @@ -939,6 +942,11 @@ static void ath9k_deinit_softc(struct ath_softc *sc) sc->dfs_detector->exit(sc->dfs_detector); ath9k_eeprom_release(sc); + + if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) { + relay_close(sc->rfs_chan_spec_scan); + sc->rfs_chan_spec_scan = NULL; + } } void ath9k_deinit_device(struct ath_softc *sc) @@ -952,7 +960,6 @@ void ath9k_deinit_device(struct ath_softc *sc) ath9k_ps_restore(sc); - ath9k_deinit_debug(sc); ieee80211_unregister_hw(hw); ath_rx_cleanup(sc); ath9k_deinit_softc(sc); diff --git a/trunk/drivers/net/wireless/ath/ath9k/mac.c b/trunk/drivers/net/wireless/ath/ath9k/mac.c index 566109a40fb3..498fee04afa0 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/mac.c +++ b/trunk/drivers/net/wireless/ath/ath9k/mac.c @@ -410,7 +410,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); - if (AR_SREV_9340(ah) && !AR_SREV_9340_13_OR_LATER(ah)) + if (AR_SREV_9340(ah)) REG_WRITE(ah, AR_DMISC(q), AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1); else diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c index 5092ecae7706..6963862a1872 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/main.c @@ -227,13 +227,13 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) goto work; + ath9k_set_beacon(sc); + if (ah->opmode == NL80211_IFTYPE_STATION && test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { spin_lock_irqsave(&sc->sc_pm_lock, flags); sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; spin_unlock_irqrestore(&sc->sc_pm_lock, flags); - } else { - ath9k_set_beacon(sc); } work: ath_restart_work(sc); @@ -1332,7 +1332,6 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_node *an = (struct ath_node *) sta->drv_priv; struct ieee80211_key_conf ps_key = { }; - int key; ath_node_attach(sc, sta, vif); @@ -1340,9 +1339,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, vif->type != NL80211_IFTYPE_AP_VLAN) return 0; - key = ath_key_config(common, vif, sta, &ps_key); - if (key > 0) - an->ps_key = key; + an->ps_key = ath_key_config(common, vif, sta, &ps_key); return 0; } @@ -1359,7 +1356,6 @@ static void ath9k_del_ps_key(struct ath_softc *sc, return; ath_key_delete(common, &ps_key); - an->ps_key = 0; } static int ath9k_sta_remove(struct ieee80211_hw *hw, @@ -1687,7 +1683,6 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, u16 tid, u16 *ssn, u8 buf_size) { struct ath_softc *sc = hw->priv; - bool flush = false; int ret = 0; local_bh_disable(); @@ -1704,14 +1699,12 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); ath9k_ps_restore(sc); break; + case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - flush = true; - case IEEE80211_AMPDU_TX_STOP_CONT: ath9k_ps_wakeup(sc); ath_tx_aggr_stop(sc, sta, tid); - if (!flush) - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); ath9k_ps_restore(sc); break; case IEEE80211_AMPDU_TX_OPERATIONAL: diff --git a/trunk/drivers/net/wireless/ath/ath9k/rc.c b/trunk/drivers/net/wireless/ath/ath9k/rc.c index 7eb1f4b458e4..aa4d368d8d3d 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/rc.c +++ b/trunk/drivers/net/wireless/ath/ath9k/rc.c @@ -1227,7 +1227,10 @@ static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta, return false; txtid = ATH_AN_2_TID(an, tidno); - return !txtid->active; + + if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS))) + return true; + return false; } diff --git a/trunk/drivers/net/wireless/ath/ath9k/rc.h b/trunk/drivers/net/wireless/ath/ath9k/rc.h index b9a87383cb43..267dbfcfaa96 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/rc.h +++ b/trunk/drivers/net/wireless/ath/ath9k/rc.h @@ -231,7 +231,7 @@ static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix, } #endif -#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL +#ifdef CONFIG_ATH9K_RATE_CONTROL int ath_rate_control_register(void); void ath_rate_control_unregister(void); #else diff --git a/trunk/drivers/net/wireless/ath/ath9k/reg.h b/trunk/drivers/net/wireless/ath/ath9k/reg.h index f7c90cc58d56..5c4ab5026dca 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/reg.h +++ b/trunk/drivers/net/wireless/ath/ath9k/reg.h @@ -798,10 +798,6 @@ #define AR_SREV_REVISION_9485_10 0 #define AR_SREV_REVISION_9485_11 1 #define AR_SREV_VERSION_9340 0x300 -#define AR_SREV_REVISION_9340_10 0 -#define AR_SREV_REVISION_9340_11 1 -#define AR_SREV_REVISION_9340_12 2 -#define AR_SREV_REVISION_9340_13 3 #define AR_SREV_VERSION_9580 0x1C0 #define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */ #define AR_SREV_VERSION_9462 0x280 @@ -901,10 +897,6 @@ #define AR_SREV_9340(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9340)) -#define AR_SREV_9340_13_OR_LATER(_ah) \ - (AR_SREV_9340((_ah)) && \ - ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9340_13)) - #define AR_SREV_9285E_20(_ah) \ (AR_SREV_9285_12_OR_LATER(_ah) && \ ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1)) @@ -1015,8 +1007,6 @@ enum { AR_INTR_SYNC_LOCAL_TIMEOUT | AR_INTR_SYNC_MAC_SLEEP_ACCESS), - AR9340_INTR_SYNC_LOCAL_TIMEOUT = 0x00000010, - AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF, }; @@ -1891,7 +1881,6 @@ enum { #define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF #define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700 #define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380 -#define AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE 0x500 #define AR_PCU_MISC_MODE2 0x8344 #define AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE 0x00000002 diff --git a/trunk/drivers/net/wireless/ath/ath9k/xmit.c b/trunk/drivers/net/wireless/ath/ath9k/xmit.c index 83ab6be3fe6d..eab0fcb7ded6 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/xmit.c +++ b/trunk/drivers/net/wireless/ath/ath9k/xmit.c @@ -125,6 +125,24 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) list_add_tail(&ac->list, &txq->axq_acq); } +static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + struct ath_txq *txq = tid->ac->txq; + + WARN_ON(!tid->paused); + + ath_txq_lock(sc, txq); + tid->paused = false; + + if (skb_queue_empty(&tid->buf_q)) + goto unlock; + + ath_tx_queue_tid(txq, tid); + ath_txq_schedule(sc, txq); +unlock: + ath_txq_unlock_complete(sc, txq); +} + static struct ath_frame_info *get_frame_info(struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); @@ -183,6 +201,11 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) } } + if (tid->baw_head == tid->baw_tail) { + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_CLEANUP; + } + if (sendbar) { ath_txq_unlock(sc, txq); ath_send_bar(tid, tid->seq_start); @@ -254,7 +277,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, list_add_tail(&bf->list, &bf_head); - ath_tx_update_baw(sc, tid, bf->bf_state.seqno); + if (fi->retries) + ath_tx_update_baw(sc, tid, bf->bf_state.seqno); + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); } @@ -466,19 +491,19 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, tx_info = IEEE80211_SKB_CB(skb); fi = get_frame_info(skb); - if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { - /* - * Outside of the current BlockAck window, - * maybe part of a previous session - */ - txfail = 1; - } else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { + if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { /* transmit completion, subframe is * acked by block ack */ acked_cnt++; } else if (!isaggr && txok) { /* transmit completion */ acked_cnt++; + } else if (tid->state & AGGR_CLEANUP) { + /* + * cleanup in progress, just fail + * the un-acked sub-frames + */ + txfail = 1; } else if (flush) { txpending = 1; } else if (fi->retries < ATH_MAX_SW_RETRIES) { @@ -502,7 +527,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (bf_next != NULL || !bf_last->bf_stale) list_move_tail(&bf->list, &bf_head); - if (!txpending) { + if (!txpending || (tid->state & AGGR_CLEANUP)) { /* * complete the acked-ones/xretried ones; update * block-ack window @@ -576,6 +601,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_txq_lock(sc, txq); } + if (tid->state & AGGR_CLEANUP) + ath_tx_flush_tid(sc, tid); + rcu_read_unlock(); if (needreset) @@ -592,7 +620,6 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf, struct list_head *bf_head) { - struct ieee80211_tx_info *info; bool txok, flush; txok = !(ts->ts_status & ATH9K_TXERR_MASK); @@ -604,12 +631,8 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, txq->axq_ampdu_depth--; if (!bf_isampdu(bf)) { - if (!flush) { - info = IEEE80211_SKB_CB(bf->bf_mpdu); - memcpy(info->control.rates, bf->rates, - sizeof(info->control.rates)); + if (!flush) ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); - } ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); } else ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok); @@ -653,7 +676,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, skb = bf->bf_mpdu; tx_info = IEEE80211_SKB_CB(skb); - rates = bf->rates; + rates = tx_info->control.rates; /* * Find the lowest frame length among the rate series that will have a @@ -1208,6 +1231,9 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, an = (struct ath_node *)sta->drv_priv; txtid = ATH_AN_2_TID(an, tid); + if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) + return -EAGAIN; + /* update ampdu factor/density, they may have changed. This may happen * in HT IBSS when a beacon with HT-info is received after the station * has already been added. @@ -1219,7 +1245,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, an->mpdudensity = density; } - txtid->active = true; + txtid->state |= AGGR_ADDBA_PROGRESS; txtid->paused = true; *ssn = txtid->seq_start = txtid->seq_next; txtid->bar_index = -1; @@ -1236,9 +1262,28 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); struct ath_txq *txq = txtid->ac->txq; + if (txtid->state & AGGR_CLEANUP) + return; + + if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { + txtid->state &= ~AGGR_ADDBA_PROGRESS; + return; + } + ath_txq_lock(sc, txq); - txtid->active = false; txtid->paused = true; + + /* + * If frames are still being transmitted for this TID, they will be + * cleaned up during tx completion. To prevent race conditions, this + * TID can only be reused after all in-progress subframes have been + * completed. + */ + if (txtid->baw_head != txtid->baw_tail) + txtid->state |= AGGR_CLEANUP; + else + txtid->state &= ~AGGR_ADDBA_COMPLETE; + ath_tx_flush_tid(sc, txtid); ath_txq_unlock_complete(sc, txq); } @@ -1304,28 +1349,18 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) } } -void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, - u16 tidno) +void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) { - struct ath_atx_tid *tid; + struct ath_atx_tid *txtid; struct ath_node *an; - struct ath_txq *txq; an = (struct ath_node *)sta->drv_priv; - tid = ATH_AN_2_TID(an, tidno); - txq = tid->ac->txq; - - ath_txq_lock(sc, txq); - - tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; - tid->paused = false; - if (!skb_queue_empty(&tid->buf_q)) { - ath_tx_queue_tid(txq, tid); - ath_txq_schedule(sc, txq); - } - - ath_txq_unlock_complete(sc, txq); + txtid = ATH_AN_2_TID(an, tid); + txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; + txtid->state |= AGGR_ADDBA_COMPLETE; + txtid->state &= ~AGGR_ADDBA_PROGRESS; + ath_tx_resume_tid(sc, txtid); } /********************/ @@ -1570,8 +1605,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) return; - rcu_read_lock(); - ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); @@ -1610,10 +1643,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) if (ac == last_ac || txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) - break; + return; } - - rcu_read_unlock(); } /***********/ @@ -2378,10 +2409,12 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) tid->baw_head = tid->baw_tail = 0; tid->sched = false; tid->paused = false; - tid->active = false; + tid->state &= ~AGGR_CLEANUP; __skb_queue_head_init(&tid->buf_q); acno = TID_TO_WME_AC(tidno); tid->ac = &an->ac[acno]; + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_ADDBA_PROGRESS; } for (acno = 0, ac = &an->ac[acno]; @@ -2418,7 +2451,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) } ath_tid_drain(sc, txq, tid); - tid->active = false; + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_CLEANUP; ath_txq_unlock(sc, txq); } diff --git a/trunk/drivers/net/wireless/atmel.c b/trunk/drivers/net/wireless/atmel.c index b827d51c30a3..830bb1d1f957 100644 --- a/trunk/drivers/net/wireless/atmel.c +++ b/trunk/drivers/net/wireless/atmel.c @@ -1624,7 +1624,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, netif_carrier_off(dev); - if (!proc_create_data("driver/atmel", 0, NULL, &atmel_proc_fops, priv)) + if (!proc_create_data("driver/atmel", 0, NULL, &atmel_proc_fops, priv)); printk(KERN_WARNING "atmel: unable to create /proc entry.\n"); printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %pM\n", diff --git a/trunk/drivers/net/wireless/b43/dma.c b/trunk/drivers/net/wireless/b43/dma.c index f7c70b3a6ea9..523355b87659 100644 --- a/trunk/drivers/net/wireless/b43/dma.c +++ b/trunk/drivers/net/wireless/b43/dma.c @@ -1728,25 +1728,6 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); } -void b43_dma_handle_rx_overflow(struct b43_dmaring *ring) -{ - int current_slot, previous_slot; - - B43_WARN_ON(ring->tx); - - /* Device has filled all buffers, drop all packets and let TCP - * decrease speed. - * Decrement RX index by one will let the device to see all slots - * as free again - */ - /* - *TODO: How to increase rx_drop in mac80211? - */ - current_slot = ring->ops->get_current_rxslot(ring); - previous_slot = prev_slot(ring, current_slot); - ring->ops->set_current_rxslot(ring, previous_slot); -} - void b43_dma_rx(struct b43_dmaring *ring) { const struct b43_dma_ops *ops = ring->ops; diff --git a/trunk/drivers/net/wireless/b43/dma.h b/trunk/drivers/net/wireless/b43/dma.h index df8c8cdcbdb5..9fdd1983079c 100644 --- a/trunk/drivers/net/wireless/b43/dma.h +++ b/trunk/drivers/net/wireless/b43/dma.h @@ -9,7 +9,7 @@ /* DMA-Interrupt reasons. */ #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ | (1 << 14) | (1 << 15)) -#define B43_DMAIRQ_RDESC_UFLOW (1 << 13) +#define B43_DMAIRQ_NONFATALMASK (1 << 13) #define B43_DMAIRQ_RX_DONE (1 << 16) /*** 32-bit DMA Engine. ***/ @@ -295,8 +295,6 @@ int b43_dma_tx(struct b43_wldev *dev, void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status); -void b43_dma_handle_rx_overflow(struct b43_dmaring *ring); - void b43_dma_rx(struct b43_dmaring *ring); void b43_dma_direct_fifo_rx(struct b43_wldev *dev, diff --git a/trunk/drivers/net/wireless/b43/main.c b/trunk/drivers/net/wireless/b43/main.c index a95b77ab360e..d377f77d30b5 100644 --- a/trunk/drivers/net/wireless/b43/main.c +++ b/trunk/drivers/net/wireless/b43/main.c @@ -1902,18 +1902,30 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) } } - if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) { - b43err(dev->wl, - "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n", - dma_reason[0], dma_reason[1], - dma_reason[2], dma_reason[3], - dma_reason[4], dma_reason[5]); - b43err(dev->wl, "This device does not support DMA " + if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | + B43_DMAIRQ_NONFATALMASK))) { + if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { + b43err(dev->wl, "Fatal DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43err(dev->wl, "This device does not support DMA " "on your system. It will now be switched to PIO.\n"); - /* Fall back to PIO transfers if we get fatal DMA errors! */ - dev->use_pio = true; - b43_controller_restart(dev, "DMA error"); - return; + /* Fall back to PIO transfers if we get fatal DMA errors! */ + dev->use_pio = true; + b43_controller_restart(dev, "DMA error"); + return; + } + if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { + b43err(dev->wl, "DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + } } if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) @@ -1932,11 +1944,6 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) handle_irq_noise(dev); /* Check the DMA reason registers for received data. */ - if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) { - if (B43_DEBUG) - b43warn(dev->wl, "RX descriptor underrun\n"); - b43_dma_handle_rx_overflow(dev->dma.rx_ring); - } if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { if (b43_using_pio_transfers(dev)) b43_pio_rx(dev->pio.rx_queue); @@ -1994,7 +2001,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev) return IRQ_NONE; dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) - & 0x0001FC00; + & 0x0001DC00; dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) & 0x0000DC00; dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) @@ -2458,7 +2465,7 @@ static void b43_request_firmware(struct work_struct *work) for (i = 0; i < B43_NR_FWTYPES; i++) { errmsg = ctx->errors[i]; if (strlen(errmsg)) - b43err(dev->wl, "%s", errmsg); + b43err(dev->wl, errmsg); } b43_print_fw_helptext(dev->wl, 1); goto out; @@ -3123,7 +3130,7 @@ static int b43_chip_init(struct b43_wldev *dev) b43_write32(dev, 0x018C, 0x02000000); } b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); - b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00); + b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c index 9431af2465f3..be0787cab24f 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c @@ -27,6 +27,7 @@ #include "tracepoint.h" #define PKTFILTER_BUF_SIZE 128 +#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */ #define BRCMF_DEFAULT_BCN_TIMEOUT 3 #define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 @@ -337,6 +338,23 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) goto done; } + /* Try to set and enable ARP offload feature, this may fail */ + err = brcmf_fil_iovar_int_set(ifp, "arp_ol", BRCMF_ARPOL_MODE); + if (err) { + brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n", + BRCMF_ARPOL_MODE, err); + err = 0; + } else { + err = brcmf_fil_iovar_int_set(ifp, "arpoe", 1); + if (err) { + brcmf_dbg(TRACE, "failed to enable ARP offload err = %d\n", + err); + err = 0; + } else + brcmf_dbg(TRACE, "successfully enabled ARP offload to 0x%x\n", + BRCMF_ARPOL_MODE); + } + /* Setup packet filter */ brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER); brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER, diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 2c593570497c..59c25463e428 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -653,13 +653,10 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name); - ndev->destructor = free_netdev; return 0; fail: - drvr->iflist[ifp->bssidx] = NULL; ndev->netdev_ops = NULL; - free_netdev(ndev); return -EBADE; } @@ -723,9 +720,6 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp) return 0; fail: - ifp->drvr->iflist[ifp->bssidx] = NULL; - ndev->netdev_ops = NULL; - free_netdev(ndev); return -EBADE; } @@ -794,7 +788,6 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx) struct brcmf_if *ifp; ifp = drvr->iflist[bssidx]; - drvr->iflist[bssidx] = NULL; if (!ifp) { brcmf_err("Null interface, idx=%d\n", bssidx); return; @@ -815,13 +808,15 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx) cancel_work_sync(&ifp->setmacaddr_work); cancel_work_sync(&ifp->multicast_work); } - /* unregister will take care of freeing it */ + unregister_netdev(ifp->ndev); if (bssidx == 0) brcmf_cfg80211_detach(drvr->config); + free_netdev(ifp->ndev); } else { kfree(ifp); } + drvr->iflist[bssidx] = NULL; } int brcmf_attach(uint bus_hdrlen, struct device *dev) @@ -930,10 +925,8 @@ int brcmf_bus_start(struct device *dev) brcmf_fws_del_interface(ifp); brcmf_fws_deinit(drvr); } - if (drvr->iflist[0]) { - free_netdev(ifp->ndev); - drvr->iflist[0] = NULL; - } + free_netdev(ifp->ndev); + drvr->iflist[0] = NULL; if (p2p_ifp) { free_netdev(p2p_ifp->ndev); drvr->iflist[1] = NULL; @@ -941,8 +934,7 @@ int brcmf_bus_start(struct device *dev) return ret; } if ((brcmf_p2p_enable) && (p2p_ifp)) - if (brcmf_net_p2p_attach(p2p_ifp) < 0) - brcmf_p2p_enable = 0; + brcmf_net_p2p_attach(p2p_ifp); return 0; } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index 83ee53a7c76e..5a64280e6485 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/fweh.c @@ -202,8 +202,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, return; brcmf_fws_add_interface(ifp); if (!drvr->fweh.evt_handler[BRCMF_E_IF]) - if (brcmf_net_attach(ifp, false) < 0) - return; + err = brcmf_net_attach(ifp, false); } if (ifevent->action == BRCMF_E_IF_CHANGE) diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h index 665ef69e974b..0f2c83bc95dc 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h @@ -23,12 +23,6 @@ #define BRCMF_FIL_ACTION_FRAME_SIZE 1800 -/* ARP Offload feature flags for arp_ol iovar */ -#define BRCMF_ARP_OL_AGENT 0x00000001 -#define BRCMF_ARP_OL_SNOOP 0x00000002 -#define BRCMF_ARP_OL_HOST_AUTO_REPLY 0x00000004 -#define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008 - enum brcmf_fil_p2p_if_types { BRCMF_FIL_P2P_IF_CLIENT, diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index 79555f006d53..e7a1a4770996 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -47,7 +47,6 @@ #define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \ (channel == SOCIAL_CHAN_2) || \ (channel == SOCIAL_CHAN_3)) -#define BRCMF_P2P_TEMP_CHAN SOCIAL_CHAN_3 #define SOCIAL_CHAN_CNT 3 #define AF_PEER_SEARCH_CNT 2 @@ -1955,21 +1954,21 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg) err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1); if (err < 0) { brcmf_err("set p2p_disc error\n"); - brcmf_free_vif(cfg, p2p_vif); + brcmf_free_vif(p2p_vif); goto exit; } /* obtain bsscfg index for P2P discovery */ err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx); if (err < 0) { brcmf_err("retrieving discover bsscfg index failed\n"); - brcmf_free_vif(cfg, p2p_vif); + brcmf_free_vif(p2p_vif); goto exit; } /* Verify that firmware uses same bssidx as driver !! */ if (p2p_ifp->bssidx != bssidx) { brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n", bssidx, p2p_ifp->bssidx); - brcmf_free_vif(cfg, p2p_vif); + brcmf_free_vif(p2p_vif); goto exit; } @@ -1997,7 +1996,7 @@ void brcmf_p2p_detach(struct brcmf_p2p_info *p2p) brcmf_p2p_cancel_remain_on_channel(vif->ifp); brcmf_p2p_deinit_discovery(p2p); /* remove discovery interface */ - brcmf_free_vif(p2p->cfg, vif); + brcmf_free_vif(vif); p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; } /* just set it all to zero */ @@ -2014,30 +2013,17 @@ static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p, u16 *chanspec) { struct brcmf_if *ifp; - u8 mac_addr[ETH_ALEN]; + struct brcmf_fil_chan_info_le ci; struct brcmu_chan ch; - struct brcmf_bss_info_le *bi; - u8 *buf; + s32 err; ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; - if (brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mac_addr, - ETH_ALEN) == 0) { - buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); - if (buf != NULL) { - *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX); - if (brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO, - buf, WL_BSS_INFO_MAX) == 0) { - bi = (struct brcmf_bss_info_le *)(buf + 4); - *chanspec = le16_to_cpu(bi->chanspec); - kfree(buf); - return; - } - kfree(buf); - } - } - /* Use default channel for P2P */ - ch.chnum = BRCMF_P2P_TEMP_CHAN; + ch.chnum = 11; + + err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci)); + if (!err) + ch.chnum = le32_to_cpu(ci.hw_channel); ch.bw = BRCMU_CHAN_BW_20; p2p->cfg->d11inf.encchspec(&ch); *chanspec = ch.chspec; @@ -2222,7 +2208,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, return &p2p_vif->wdev; fail: - brcmf_free_vif(p2p->cfg, p2p_vif); + brcmf_free_vif(p2p_vif); return ERR_PTR(err); } @@ -2231,31 +2217,13 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, * * @vif: virtual interface object to delete. */ -static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_info *cfg, - struct brcmf_cfg80211_vif *vif) -{ - cfg80211_unregister_wdev(&vif->wdev); - cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; - brcmf_free_vif(cfg, vif); -} - -/** - * brcmf_p2p_free_p2p_if() - free up net device related data. - * - * @ndev: net device that needs to be freed. - */ -static void brcmf_p2p_free_p2p_if(struct net_device *ndev) +static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_vif *vif) { - struct brcmf_cfg80211_info *cfg; - struct brcmf_cfg80211_vif *vif; - struct brcmf_if *ifp; - - ifp = netdev_priv(ndev); - cfg = ifp->drvr->config; - vif = ifp->vif; + struct brcmf_p2p_info *p2p = &vif->ifp->drvr->config->p2p; - brcmf_free_vif(cfg, vif); - free_netdev(ifp->ndev); + cfg80211_unregister_wdev(&vif->wdev); + p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; + brcmf_free_vif(vif); } /** @@ -2335,9 +2303,6 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, brcmf_err("Registering netdevice failed\n"); goto fail; } - /* override destructor */ - ifp->ndev->destructor = brcmf_p2p_free_p2p_if; - cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif; /* Disable firmware roaming for P2P interface */ brcmf_fil_iovar_int_set(ifp, "roam_off", 1); @@ -2349,7 +2314,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, return &ifp->vif->wdev; fail: - brcmf_free_vif(cfg, vif); + brcmf_free_vif(vif); return ERR_PTR(err); } @@ -2385,7 +2350,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) break; case NL80211_IFTYPE_P2P_DEVICE: - brcmf_p2p_delete_p2pdev(cfg, vif); + brcmf_p2p_delete_p2pdev(vif); return 0; default: return -ENOTSUPP; @@ -2413,6 +2378,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) err = 0; } brcmf_cfg80211_arm_vif_event(cfg, NULL); + brcmf_free_vif(vif); p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; return err; diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 301e572e8923..6d758f285352 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -459,38 +459,6 @@ send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key) return err; } -static s32 -brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable) -{ - s32 err; - u32 mode; - - if (enable) - mode = BRCMF_ARP_OL_AGENT | BRCMF_ARP_OL_PEER_AUTO_REPLY; - else - mode = 0; - - /* Try to set and enable ARP offload feature, this may fail, then it */ - /* is simply not supported and err 0 will be returned */ - err = brcmf_fil_iovar_int_set(ifp, "arp_ol", mode); - if (err) { - brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n", - mode, err); - err = 0; - } else { - err = brcmf_fil_iovar_int_set(ifp, "arpoe", enable); - if (err) { - brcmf_dbg(TRACE, "failed to configure (%d) ARP offload err = %d\n", - enable, err); - err = 0; - } else - brcmf_dbg(TRACE, "successfully configured (%d) ARP offload to 0x%x\n", - enable, mode); - } - - return err; -} - static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, const char *name, enum nl80211_iftype type, @@ -2248,11 +2216,6 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, } pm = enabled ? PM_FAST : PM_OFF; - /* Do not enable the power save after assoc if it is a p2p interface */ - if (ifp->vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) { - brcmf_dbg(INFO, "Do not enable power save for P2P clients\n"); - pm = PM_OFF; - } brcmf_dbg(INFO, "power save %s\n", (pm ? "enabled" : "disabled")); err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm); @@ -3676,29 +3639,11 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif, return err; } -static s32 -brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg, - struct brcmf_if *ifp, - struct ieee80211_channel *channel) -{ - u16 chanspec; - s32 err; - - brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band, - channel->center_freq); - - chanspec = channel_to_chanspec(&cfg->d11inf, channel); - err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); - - return err; -} - static s32 brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ap_settings *settings) { s32 ie_offset; - struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_tlv *ssid_ie; struct brcmf_ssid_le ssid_le; @@ -3738,7 +3683,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, } brcmf_set_mpc(ifp, 0); - brcmf_configure_arp_offload(ifp, false); /* find the RSN_IE */ rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, @@ -3769,12 +3713,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon); - err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan); - if (err < 0) { - brcmf_err("Set Channel failed, %d\n", err); - goto exit; - } - if (settings->beacon_interval) { err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, settings->beacon_interval); @@ -3851,10 +3789,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); exit: - if (err) { + if (err) brcmf_set_mpc(ifp, 1); - brcmf_configure_arp_offload(ifp, true); - } return err; } @@ -3895,7 +3831,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) brcmf_err("bss_enable config failed %d\n", err); } brcmf_set_mpc(ifp, 1); - brcmf_configure_arp_offload(ifp, true); set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); @@ -4205,15 +4140,11 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = { .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_DEVICE) - } }; static const struct ieee80211_iface_combination brcmf_iface_combos[] = { { .max_interfaces = BRCMF_IFACE_MAX_CNT, - .num_different_channels = 2, + .num_different_channels = 1, /* no multi-channel for now */ .n_limits = ARRAY_SIZE(brcmf_iface_limits), .limits = brcmf_iface_limits } @@ -4266,8 +4197,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_DEVICE); + BIT(NL80211_IFTYPE_P2P_GO); wiphy->iface_combinations = brcmf_iface_combos; wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; @@ -4321,16 +4251,20 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, return vif; } -void brcmf_free_vif(struct brcmf_cfg80211_info *cfg, - struct brcmf_cfg80211_vif *vif) +void brcmf_free_vif(struct brcmf_cfg80211_vif *vif) { + struct brcmf_cfg80211_info *cfg; + struct wiphy *wiphy; + + wiphy = vif->wdev.wiphy; + cfg = wiphy_priv(wiphy); list_del(&vif->list); cfg->vif_cnt--; kfree(vif); if (!cfg->vif_cnt) { - wiphy_unregister(cfg->wiphy); - wiphy_free(cfg->wiphy); + wiphy_unregister(wiphy); + wiphy_free(wiphy); } } @@ -4707,6 +4641,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp, return 0; case BRCMF_E_IF_DEL: + ifp->vif = NULL; mutex_unlock(&event->vif_event_lock); /* event may not be upon user request */ if (brcmf_cfg80211_vif_event_armed(cfg)) @@ -4912,7 +4847,8 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, wl_deinit_priv(cfg); cfg80211_attach_out: - brcmf_free_vif(cfg, vif); + brcmf_free_vif(vif); + wiphy_free(wiphy); return NULL; } @@ -4924,7 +4860,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) wl_deinit_priv(cfg); brcmf_btcoex_detach(cfg); list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) { - brcmf_free_vif(cfg, vif); + brcmf_free_vif(vif); } } @@ -5288,8 +5224,6 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) if (err) goto default_conf_out; - brcmf_configure_arp_offload(ifp, true); - cfg->dongle_up = true; default_conf_out: diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h index d9bdaf9a72d0..a71cff84cdcf 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h @@ -487,8 +487,7 @@ enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp); struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, enum nl80211_iftype type, bool pm_block); -void brcmf_free_vif(struct brcmf_cfg80211_info *cfg, - struct brcmf_cfg80211_vif *vif); +void brcmf_free_vif(struct brcmf_cfg80211_vif *vif); s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, const u8 *vndr_ie_buf, u32 vndr_ie_len); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c index 9fd6f2fef11b..28e7aeedd184 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -3074,8 +3074,21 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail) */ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) { - /* not supporting PS so always return false for now */ - return false; + /* disallow PS when one of the following global conditions meets */ + if (!wlc->pub->associated) + return false; + + /* disallow PS when one of these meets when not scanning */ + if (wlc->filter_flags & FIF_PROMISC_IN_BSS) + return false; + + if (wlc->bsscfg->type == BRCMS_TYPE_AP) + return false; + + if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC) + return false; + + return true; } static void brcms_c_statsupd(struct brcms_c_info *wlc) diff --git a/trunk/drivers/net/wireless/iwlegacy/3945-rs.c b/trunk/drivers/net/wireless/iwlegacy/3945-rs.c index fe31590a51b2..c9f197d9ca1e 100644 --- a/trunk/drivers/net/wireless/iwlegacy/3945-rs.c +++ b/trunk/drivers/net/wireless/iwlegacy/3945-rs.c @@ -816,7 +816,6 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, rs_sta->last_txrate_idx = idx; info->control.rates[0].idx = rs_sta->last_txrate_idx; } - info->control.rates[0].count = 1; D_RATE("leave: %d\n", idx); } diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-mac.c b/trunk/drivers/net/wireless/iwlegacy/4965-mac.c index 9a95045c97b6..b8f82e688c72 100644 --- a/trunk/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/trunk/drivers/net/wireless/iwlegacy/4965-mac.c @@ -5741,7 +5741,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; + IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS; if (il->cfg->sku & IL_SKU_N) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-rs.c b/trunk/drivers/net/wireless/iwlegacy/4965-rs.c index ed3c42a63a43..1fc0b227e120 100644 --- a/trunk/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/trunk/drivers/net/wireless/iwlegacy/4965-rs.c @@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; - info->control.rates[0].count = 1; + } static void * diff --git a/trunk/drivers/net/wireless/iwlegacy/common.c b/trunk/drivers/net/wireless/iwlegacy/common.c index e9a3cbc409ae..592d0aa634a8 100644 --- a/trunk/drivers/net/wireless/iwlegacy/common.c +++ b/trunk/drivers/net/wireless/iwlegacy/common.c @@ -1423,7 +1423,7 @@ il_setup_rx_scan_handlers(struct il_priv *il) } EXPORT_SYMBOL(il_setup_rx_scan_handlers); -u16 +inline u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, u8 n_probes) { diff --git a/trunk/drivers/net/wireless/iwlegacy/common.h b/trunk/drivers/net/wireless/iwlegacy/common.h index 4caaf52986a4..f8246f2d88f9 100644 --- a/trunk/drivers/net/wireless/iwlegacy/common.h +++ b/trunk/drivers/net/wireless/iwlegacy/common.h @@ -1832,16 +1832,16 @@ u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval); __le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, u32 beacon_interval); -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM extern const struct dev_pm_ops il_pm_ops; #define IL_LEGACY_PM_OPS (&il_pm_ops) -#else /* !CONFIG_PM_SLEEP */ +#else /* !CONFIG_PM */ #define IL_LEGACY_PM_OPS NULL -#endif /* !CONFIG_PM_SLEEP */ +#endif /* !CONFIG_PM */ /***************************************************** * Error Handling Debugging diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c b/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c index 10fbb176cc8e..907bd6e50aad 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; - info->control.rates[0].count = 1; + } static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c b/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c index cd1ad0019185..707446fa00bd 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv) struct iwl_chain_noise_data *data = &priv->chain_noise_data; int ret; - if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED) + if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)) return; if ((data->state == IWL_CHAIN_NOISE_ALIVE) && diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/sta.c b/trunk/drivers/net/wireless/iwlwifi/dvm/sta.c index c3c13ce96eb0..db183b44e038 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -735,7 +735,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) memcpy(&lq, priv->stations[i].lq, sizeof(struct iwl_link_quality_cmd)); - if (memcmp(&lq, &zero_lq, sizeof(lq))) + if (!memcmp(&lq, &zero_lq, sizeof(lq))) send_lq = true; } spin_unlock_bh(&priv->sta_lock); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c b/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c index 40fed1f511e2..39aad9893e0b 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -1000,12 +1000,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) */ if (load_module) { err = request_module("%s", op->name); -#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR if (err) IWL_ERR(drv, "failed to load module %s (error %d), is dynamic loading enabled?\n", op->name, err); -#endif } return; diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/trunk/drivers/net/wireless/iwlwifi/mvm/fw-api.h index c6384555aab4..191dcae8ba47 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -173,8 +173,6 @@ enum { REPLY_DEBUG_CMD = 0xf0, DEBUG_LOG_MSG = 0xf7, - MCAST_FILTER_CMD = 0xd0, - /* D3 commands/notifications */ D3_CONFIG_CMD = 0xd3, PROT_OFFLOAD_CONFIG_CMD = 0xd4, @@ -950,29 +948,4 @@ struct iwl_set_calib_default_cmd { u8 data[0]; } __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */ -#define MAX_PORT_ID_NUM 2 - -/** - * struct iwl_mcast_filter_cmd - configure multicast filter. - * @filter_own: Set 1 to filter out multicast packets sent by station itself - * @port_id: Multicast MAC addresses array specifier. This is a strange way - * to identify network interface adopted in host-device IF. - * It is used by FW as index in array of addresses. This array has - * MAX_PORT_ID_NUM members. - * @count: Number of MAC addresses in the array - * @pass_all: Set 1 to pass all multicast packets. - * @bssid: current association BSSID. - * @addr_list: Place holder for array of MAC addresses. - * IMPORTANT: add padding if necessary to ensure DWORD alignment. - */ -struct iwl_mcast_filter_cmd { - u8 filter_own; - u8 port_id; - u8 count; - u8 pass_all; - u8 bssid[6]; - u8 reserved[2]; - u8 addr_list[0]; -} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ - #endif /* __fw_api_h__ */ diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/trunk/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index b2cc3d98e0f7..e6eca4d66f6c 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -586,12 +586,10 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, */ static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mac_data_sta *ctxt_sta, - bool force_assoc_off) + struct iwl_mac_data_sta *ctxt_sta) { /* We need the dtim_period to set the MAC as associated */ - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && - !force_assoc_off) { + if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) { u32 dtim_offs; /* @@ -661,8 +659,7 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm, cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON); /* Fill the data specific for station mode */ - iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta, - action == FW_CTXT_ACTION_ADD); + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } @@ -680,8 +677,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); /* Fill the data specific for station mode */ - iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta, - action == FW_CTXT_ACTION_ADD); + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta); cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/trunk/drivers/net/wireless/iwlwifi/mvm/mac80211.c index a5eb8c82f16a..dd158ec571fb 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -701,20 +701,6 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, *total_flags = 0; } -static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_mcast_filter_cmd mcast_filter_cmd = { - .pass_all = 1, - }; - - memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN); - - return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, - sizeof(mcast_filter_cmd), - &mcast_filter_cmd); -} - static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -736,7 +722,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, return; } iwl_mvm_bt_coex_vif_assoc(mvm, vif); - iwl_mvm_configure_mcast_filter(mvm, vif); } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { /* remove AP station now that the MAC is unassoc */ ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); @@ -946,7 +931,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, switch (cmd) { case STA_NOTIFY_SLEEP: - if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) + if (atomic_read(&mvmsta->pending_frames) > 0) ieee80211_sta_block_awake(hw, sta, true); /* * The fw updates the STA to be asleep. Tx packets on the Tx diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/mvm.h b/trunk/drivers/net/wireless/iwlwifi/mvm/mvm.h index 9f46b23801bc..8269bc562951 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -292,7 +292,6 @@ struct iwl_mvm { struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; struct work_struct sta_drained_wk; unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; - atomic_t pending_frames[IWL_MVM_STATION_COUNT]; /* configured by mac80211 */ u32 rts_threshold; diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/ops.c b/trunk/drivers/net/wireless/iwlwifi/mvm/ops.c index b29c31a41594..fe031d304d1e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -292,7 +292,6 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(BT_COEX_PROT_ENV), CMD(BT_PROFILE_NOTIFICATION), CMD(BT_CONFIG), - CMD(MCAST_FILTER_CMD), }; #undef CMD diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/rs.c b/trunk/drivers/net/wireless/iwlwifi/mvm/rs.c index b99fe3163866..55334d542e26 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -2546,7 +2546,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; - info->control.rates[0].count = 1; } static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/scan.c b/trunk/drivers/net/wireless/iwlwifi/mvm/scan.c index 2476e43799d5..2157b0f8ced5 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -298,12 +298,6 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm, else cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); - /* - * TODO: This is a WA due to a bug in the FW AUX framework that does not - * properly handle time events that fail to be scheduled - */ - cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); - cmd->repeats = cpu_to_le32(1); /* diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/sta.c b/trunk/drivers/net/wireless/iwlwifi/mvm/sta.c index 5c664ed54400..0fd96e4da461 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -219,7 +219,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; /* HW restart, don't assume the memory has been zeroed */ - atomic_set(&mvm->pending_frames[sta_id], 0); + atomic_set(&mvm_sta->pending_frames, 0); mvm_sta->tid_disable_agg = 0; mvm_sta->tfd_queue_msk = 0; for (i = 0; i < IEEE80211_NUM_ACS; i++) @@ -406,22 +406,15 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; } - /* - * Make sure that the tx response code sees the station as -EBUSY and - * calls the drain worker. - */ - spin_lock_bh(&mvm_sta->lock); /* * There are frames pending on the AC queues for this station. * We need to wait until all the frames are drained... */ - if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { + if (atomic_read(&mvm_sta->pending_frames)) { + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], ERR_PTR(-EBUSY)); - spin_unlock_bh(&mvm_sta->lock); - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); } else { - spin_unlock_bh(&mvm_sta->lock); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); } diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/sta.h b/trunk/drivers/net/wireless/iwlwifi/mvm/sta.h index a4ddce77aaae..12abd2d71835 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -274,6 +274,7 @@ struct iwl_mvm_tid_data { * @bt_reduced_txpower: is reduced tx power enabled for this station * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. + * @pending_frames: number of frames for this STA on the shared Tx queues. * @tid_data: per tid data. Look at %iwl_mvm_tid_data. * * When mac80211 creates a station it reserves some space (hw->sta_data_size) @@ -289,6 +290,7 @@ struct iwl_mvm_sta { u8 max_agg_bufsize; bool bt_reduced_txpower; spinlock_t lock; + atomic_t pending_frames; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; struct iwl_lq_sta lq_sta; struct ieee80211_vif *vif; diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c b/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c index 48c1891e3df6..479074303bd7 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -180,8 +180,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; } else if (ieee80211_is_back_req(fc)) { - tx_cmd->tx_flags |= - cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); } /* HT rate doesn't make sense for a non data frame */ @@ -417,8 +416,9 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - if (txq_id < IWL_MVM_FIRST_AGG_QUEUE) - atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); + if (mvmsta->vif->type == NL80211_IFTYPE_AP && + txq_id < IWL_MVM_FIRST_AGG_QUEUE) + atomic_inc(&mvmsta->pending_frames); return 0; @@ -680,41 +680,16 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, /* * If the txq is not an AMPDU queue, there is no chance we freed * several skbs. Check that out... + * If there are no pending frames for this STA, notify mac80211 that + * this station can go to sleep in its STA table. */ - if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) && - atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) { - if (mvmsta) { - /* - * If there are no pending frames for this STA, notify - * mac80211 that this station can go to sleep in its - * STA table. - */ - if (mvmsta->vif->type == NL80211_IFTYPE_AP) - ieee80211_sta_block_awake(mvm->hw, sta, false); - /* - * We might very well have taken mvmsta pointer while - * the station was being removed. The remove flow might - * have seen a pending_frame (because we didn't take - * the lock) even if now the queues are drained. So make - * really sure now that this the station is not being - * removed. If it is, run the drain worker to remove it. - */ - spin_lock_bh(&mvmsta->lock); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (IS_ERR_OR_NULL(sta)) { - /* - * Station disappeared in the meantime: - * so we are draining. - */ - set_bit(sta_id, mvm->sta_drained); - schedule_work(&mvm->sta_drained_wk); - } - spin_unlock_bh(&mvmsta->lock); - } else if (!mvmsta) { - /* Tx response without STA, so we are draining */ - set_bit(sta_id, mvm->sta_drained); - schedule_work(&mvm->sta_drained_wk); - } + if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && mvmsta && + !WARN_ON(skb_freed > 1) && + mvmsta->vif->type == NL80211_IFTYPE_AP && + atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) { + ieee80211_sta_block_awake(mvm->hw, sta, false); + set_bit(sta_id, mvm->sta_drained); + schedule_work(&mvm->sta_drained_wk); } rcu_read_unlock(); diff --git a/trunk/drivers/net/wireless/mac80211_hwsim.c b/trunk/drivers/net/wireless/mac80211_hwsim.c index cb34c7895f2a..b878a32e7a98 100644 --- a/trunk/drivers/net/wireless/mac80211_hwsim.c +++ b/trunk/drivers/net/wireless/mac80211_hwsim.c @@ -1723,11 +1723,11 @@ static void mac80211_hwsim_free(void) class_destroy(hwsim_class); } -static struct platform_driver mac80211_hwsim_driver = { - .driver = { - .name = "mac80211_hwsim", - .owner = THIS_MODULE, - }, + +static struct device_driver mac80211_hwsim_driver = { + .name = "mac80211_hwsim", + .bus = &platform_bus_type, + .owner = THIS_MODULE, }; static const struct net_device_ops hwsim_netdev_ops = { @@ -2219,7 +2219,7 @@ static int __init init_mac80211_hwsim(void) spin_lock_init(&hwsim_radio_lock); INIT_LIST_HEAD(&hwsim_radios); - err = platform_driver_register(&mac80211_hwsim_driver); + err = driver_register(&mac80211_hwsim_driver); if (err) return err; @@ -2254,7 +2254,7 @@ static int __init init_mac80211_hwsim(void) err = -ENOMEM; goto failed_drvdata; } - data->dev->driver = &mac80211_hwsim_driver.driver; + data->dev->driver = &mac80211_hwsim_driver; err = device_bind_driver(data->dev); if (err != 0) { printk(KERN_DEBUG @@ -2564,7 +2564,7 @@ static int __init init_mac80211_hwsim(void) failed: mac80211_hwsim_free(); failed_unregister_driver: - platform_driver_unregister(&mac80211_hwsim_driver); + driver_unregister(&mac80211_hwsim_driver); return err; } module_init(init_mac80211_hwsim); @@ -2577,6 +2577,6 @@ static void __exit exit_mac80211_hwsim(void) mac80211_hwsim_free(); unregister_netdev(hwsim_mon); - platform_driver_unregister(&mac80211_hwsim_driver); + driver_unregister(&mac80211_hwsim_driver); } module_exit(exit_mac80211_hwsim); diff --git a/trunk/drivers/net/wireless/mwifiex/cfg80211.c b/trunk/drivers/net/wireless/mwifiex/cfg80211.c index e42b266a023a..d3c8ece980d8 100644 --- a/trunk/drivers/net/wireless/mwifiex/cfg80211.c +++ b/trunk/drivers/net/wireless/mwifiex/cfg80211.c @@ -2234,6 +2234,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) if (wdev->netdev->reg_state == NETREG_REGISTERED) unregister_netdevice(wdev->netdev); + if (wdev->netdev->reg_state == NETREG_UNREGISTERED) + free_netdev(wdev->netdev); + /* Clear the priv in adapter */ priv->netdev = NULL; diff --git a/trunk/drivers/net/wireless/mwifiex/cmdevt.c b/trunk/drivers/net/wireless/mwifiex/cmdevt.c index 26755d9acb55..74db0d24a579 100644 --- a/trunk/drivers/net/wireless/mwifiex/cmdevt.c +++ b/trunk/drivers/net/wireless/mwifiex/cmdevt.c @@ -1191,7 +1191,6 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter) adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; adapter->is_hs_configured = false; - adapter->is_suspended = false; mwifiex_hs_activated_event(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), false); diff --git a/trunk/drivers/net/wireless/mwifiex/debugfs.c b/trunk/drivers/net/wireless/mwifiex/debugfs.c index a5f9875cfd6e..753b5682d53f 100644 --- a/trunk/drivers/net/wireless/mwifiex/debugfs.c +++ b/trunk/drivers/net/wireless/mwifiex/debugfs.c @@ -26,17 +26,10 @@ static struct dentry *mwifiex_dfs_dir; static char *bss_modes[] = { - "UNSPECIFIED", - "ADHOC", - "STATION", - "AP", - "AP_VLAN", - "WDS", - "MONITOR", - "MESH_POINT", - "P2P_CLIENT", - "P2P_GO", - "P2P_DEVICE", + "Unknown", + "Ad-hoc", + "Managed", + "Auto" }; /* size/addr for mwifiex_debug_info */ @@ -207,12 +200,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf, p += sprintf(p, "driver_version = %s", fmt); p += sprintf(p, "\nverext = %s", priv->version_str); p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name); - - if (info.bss_mode >= ARRAY_SIZE(bss_modes)) - p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode); - else - p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]); - + p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]); p += sprintf(p, "media_state=\"%s\"\n", (!priv->media_connected ? "Disconnected" : "Connected")); p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr); diff --git a/trunk/drivers/net/wireless/mwifiex/main.c b/trunk/drivers/net/wireless/mwifiex/main.c index 2eb88ea9acf7..121443a0f2a1 100644 --- a/trunk/drivers/net/wireless/mwifiex/main.c +++ b/trunk/drivers/net/wireless/mwifiex/main.c @@ -655,7 +655,6 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev) { dev->netdev_ops = &mwifiex_netdev_ops; - dev->destructor = free_netdev; /* Initialize private structure */ priv->current_key_index = 0; priv->media_connected = false; diff --git a/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c b/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c index 1a8a19dbd635..311d0b26b81c 100644 --- a/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -96,7 +96,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, } else { /* Multicast */ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; - if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) { + if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) { dev_dbg(priv->adapter->dev, "info: Enabling All Multicast!\n"); priv->curr_pkt_filter |= @@ -108,11 +108,20 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, dev_dbg(priv->adapter->dev, "info: Set multicast list=%d\n", mcast_list->num_multicast_addr); - /* Send multicast addresses to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); + /* Set multicast addresses to firmware */ + if (old_pkt_filter == priv->curr_pkt_filter) { + /* Send request to firmware */ + ret = mwifiex_send_cmd_async(priv, + HostCmd_CMD_MAC_MULTICAST_ADR, + HostCmd_ACT_GEN_SET, 0, + mcast_list); + } else { + /* Send request to firmware */ + ret = mwifiex_send_cmd_async(priv, + HostCmd_CMD_MAC_MULTICAST_ADR, + HostCmd_ACT_GEN_SET, 0, + mcast_list); + } } } } diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c index 72f32e5caa4d..b52d70c75e1a 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3027,26 +3027,19 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, * TODO: we do not use +6 dBm option to do not increase power beyond * regulatory limit, however this could be utilized for devices with * CAPABILITY_POWER_LIMIT. - * - * TODO: add different temperature compensation code for RT3290 & RT5390 - * to allow to use BBP_R1 for those chips. */ - if (!rt2x00_rt(rt2x00dev, RT3290) && - !rt2x00_rt(rt2x00dev, RT5390)) { - rt2800_bbp_read(rt2x00dev, 1, &r1); - if (delta <= -12) { - power_ctrl = 2; - delta += 12; - } else if (delta <= -6) { - power_ctrl = 1; - delta += 6; - } else { - power_ctrl = 0; - } - rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); - rt2800_bbp_write(rt2x00dev, 1, r1); + rt2800_bbp_read(rt2x00dev, 1, &r1); + if (delta <= -12) { + power_ctrl = 2; + delta += 12; + } else if (delta <= -6) { + power_ctrl = 1; + delta += 6; + } else { + power_ctrl = 0; } - + rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); + rt2800_bbp_write(rt2x00dev, 1, r1); offset = TX_PWR_CFG_0; for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { diff --git a/trunk/drivers/net/wireless/rtlwifi/pci.c b/trunk/drivers/net/wireless/rtlwifi/pci.c index c97e9d327331..999ffc12578b 100644 --- a/trunk/drivers/net/wireless/rtlwifi/pci.c +++ b/trunk/drivers/net/wireless/rtlwifi/pci.c @@ -764,7 +764,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) "can't alloc skb for rx\n"); goto done; } - kmemleak_not_leak(new_skb); pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb), diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/trunk/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h index 21ca33a7c770..d3a02e73f53a 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h @@ -550,7 +550,7 @@ do { \ rxmcs == DESC92C_RATE11M) struct phy_rx_agc_info_t { - #ifdef __LITTLE_ENDIAN + #if __LITTLE_ENDIAN u8 gain:7, trsw:1; #else u8 trsw:1, gain:7; @@ -574,7 +574,7 @@ struct phy_status_rpt { u8 stream_target_csi[2]; u8 sig_evm; u8 rsvd_3; -#ifdef __LITTLE_ENDIAN +#if __LITTLE_ENDIAN u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ u8 sgi_en:1; u8 rxsc:2; diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 189ba124a8c6..3d0498e69c8c 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -1973,35 +1973,26 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } } -static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, - struct ieee80211_sta *sta) +void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u32 ratr_value; + u32 ratr_value = (u32) mac->basic_rates; + u8 *mcsrate = mac->mcs; u8 ratr_index = 0; u8 nmode = mac->ht_enable; - u8 mimo_ps = IEEE80211_SMPS_OFF; - u16 shortgi_rate; - u32 tmp_ratr_value; + u8 mimo_ps = 1; + u16 shortgi_rate = 0; + u32 tmp_ratr_value = 0; u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? - 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? - 1 : 0; + u8 curshortgi_40mhz = mac->sgi_40; + u8 curshortgi_20mhz = mac->sgi_20; enum wireless_mode wirelessmode = mac->mode; - if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_value = sta->supp_rates[1] << 4; - else - ratr_value = sta->supp_rates[0]; - if (mac->opmode == NL80211_IFTYPE_ADHOC) - ratr_value = 0xfff; - - ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_value |= ((*(u16 *) (mcsrate))) << 12; switch (wirelessmode) { case WIRELESS_MODE_B: if (ratr_value & 0x0000000c) @@ -2015,7 +2006,7 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: nmode = 1; - if (mimo_ps == IEEE80211_SMPS_STATIC) { + if (mimo_ps == 0) { ratr_value &= 0x0007F005; } else { u32 ratr_mask; @@ -2025,7 +2016,8 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, ratr_mask = 0x000ff005; else ratr_mask = 0x0f0ff005; - + if (curtxbw_40mhz) + ratr_mask |= 0x00000010; ratr_value &= ratr_mask; } break; @@ -2034,74 +2026,41 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, ratr_value &= 0x000ff0ff; else ratr_value &= 0x0f0ff0ff; - break; } - ratr_value &= 0x0FFFFFFF; - - if (nmode && ((curtxbw_40mhz && - curshortgi_40mhz) || (!curtxbw_40mhz && - curshortgi_20mhz))) { - + if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz))) { ratr_value |= 0x10000000; tmp_ratr_value = (ratr_value >> 12); - for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { if ((1 << shortgi_rate) & tmp_ratr_value) break; } - shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | - (shortgi_rate << 4) | (shortgi_rate); + (shortgi_rate << 4) | (shortgi_rate); } - rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); - - RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", - rtl_read_dword(rtlpriv, REG_ARFR0)); } -static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - u8 rssi_level) +void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_sta_info *sta_entry = NULL; - u32 ratr_bitmap; - u8 ratr_index; - u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; - u8 curshortgi_40mhz = curtxbw_40mhz && - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? - 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? - 1 : 0; - enum wireless_mode wirelessmode = 0; + u32 ratr_bitmap = (u32) mac->basic_rates; + u8 *p_mcsrate = mac->mcs; + u8 ratr_index = 0; + u8 curtxbw_40mhz = mac->bw_40; + u8 curshortgi_40mhz = mac->sgi_40; + u8 curshortgi_20mhz = mac->sgi_20; + enum wireless_mode wirelessmode = mac->mode; bool shortgi = false; u8 rate_mask[5]; u8 macid = 0; - u8 mimo_ps = IEEE80211_SMPS_OFF; - - sta_entry = (struct rtl_sta_info *) sta->drv_priv; - wirelessmode = sta_entry->wireless_mode; - if (mac->opmode == NL80211_IFTYPE_STATION || - mac->opmode == NL80211_IFTYPE_MESH_POINT) - curtxbw_40mhz = mac->bw_40; - else if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_ADHOC) - macid = sta->aid + 1; - - if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_bitmap = sta->supp_rates[1] << 4; - else - ratr_bitmap = sta->supp_rates[0]; - if (mac->opmode == NL80211_IFTYPE_ADHOC) - ratr_bitmap = 0xfff; - ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + u8 mimops = 1; + + ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: ratr_index = RATR_INX_WIRELESS_B; @@ -2112,7 +2071,6 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, break; case WIRELESS_MODE_G: ratr_index = RATR_INX_WIRELESS_GB; - if (rssi_level == 1) ratr_bitmap &= 0x00000f00; else if (rssi_level == 2) @@ -2127,8 +2085,7 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: ratr_index = RATR_INX_WIRELESS_NGB; - - if (mimo_ps == IEEE80211_SMPS_STATIC) { + if (mimops == 0) { if (rssi_level == 1) ratr_bitmap &= 0x00070000; else if (rssi_level == 2) @@ -2171,10 +2128,8 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, } } } - if ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz && curshortgi_20mhz)) { - if (macid == 0) shortgi = true; else if (macid == 1) @@ -2183,42 +2138,21 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, break; default: ratr_index = RATR_INX_WIRELESS_NGB; - if (rtlphy->rf_type == RF_1T2R) ratr_bitmap &= 0x000ff0ff; else ratr_bitmap &= 0x0f0ff0ff; break; } - sta_entry->ratr_index = ratr_index; - - RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, - "ratr_bitmap :%x\n", ratr_bitmap); - *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | - (ratr_index << 28); + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n", + ratr_bitmap); + *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) | + ratr_index << 28); rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "Rate_index:%x, ratr_val:%x, %5phC\n", ratr_index, ratr_bitmap, rate_mask); - memcpy(rtlpriv->rate_mask, rate_mask, 5); - /* rtl92c_fill_h2c_cmd() does USB I/O and will result in a - * "scheduled while atomic" if called directly */ - schedule_work(&rtlpriv->works.fill_h2c_cmd); - - if (macid != 0) - sta_entry->ratr_index = ratr_index; -} - -void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - u8 rssi_level) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - if (rtlpriv->dm.useramask) - rtl92cu_update_hal_rate_mask(hw, sta, rssi_level); - else - rtl92cu_update_hal_rate_table(hw, sta); + rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); } void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw) diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h index 8e3ec1e25644..f41a3aa4a26f 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h @@ -98,6 +98,10 @@ void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw, u32 add_msr, u32 rm_msr); void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level); +void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level); void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw); bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid); diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index da4f587199ee..85b6bdb163c0 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -289,30 +289,14 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index, macaddr = cam_const_broad; entry_id = key_index; } else { - if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_MESH_POINT) { - entry_id = rtl_cam_get_free_entry(hw, - p_macaddr); - if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); - return; - } - } else { - entry_id = CAM_PAIRWISE_KEY_POSITION; - } - key_index = PAIRWISE_KEYIDX; + entry_id = CAM_PAIRWISE_KEY_POSITION; is_pairwise = true; } } if (rtlpriv->sec.key_len[key_index] == 0) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "delete one entry\n"); - if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_MESH_POINT) - rtl_cam_del_entry(hw, p_macaddr); rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); } else { RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 826f085c29dd..23d640a4debd 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -106,7 +106,8 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { .update_interrupt_mask = rtl92cu_update_interrupt_mask, .get_hw_reg = rtl92cu_get_hw_reg, .set_hw_reg = rtl92cu_set_hw_reg, - .update_rate_tbl = rtl92cu_update_hal_rate_tbl, + .update_rate_tbl = rtl92cu_update_hal_rate_table, + .update_rate_mask = rtl92cu_update_hal_rate_mask, .fill_tx_desc = rtl92cu_tx_fill_desc, .fill_fake_txdesc = rtl92cu_fill_fake_txdesc, .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc, @@ -136,7 +137,6 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate, .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback, .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower, - .fill_h2c_cmd = rtl92c_fill_h2c_cmd, }; static struct rtl_mod_params rtl92cu_mod_params = { @@ -349,7 +349,6 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/ - {RTL_USB_DEVICE(0x0846, 0xf001, rtl92cu_hal_cfg)}, /*On Netwrks N300MA*/ {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/ {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/ diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h index 262e1e4c6e5b..a1310abd0d54 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h @@ -49,8 +49,5 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask); void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw); -void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - u8 rssi_level); #endif diff --git a/trunk/drivers/net/wireless/rtlwifi/usb.c b/trunk/drivers/net/wireless/rtlwifi/usb.c index a3532e077871..76732b0cd221 100644 --- a/trunk/drivers/net/wireless/rtlwifi/usb.c +++ b/trunk/drivers/net/wireless/rtlwifi/usb.c @@ -824,7 +824,6 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) /* should after adapter start and interrupt enable. */ set_hal_stop(rtlhal); - cancel_work_sync(&rtlpriv->works.fill_h2c_cmd); /* Enable software */ SET_USB_STOP(rtlusb); rtl_usb_deinit(hw); @@ -1027,16 +1026,6 @@ static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw, return false; } -static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work) -{ - struct rtl_works *rtlworks = - container_of(work, struct rtl_works, fill_h2c_cmd); - struct ieee80211_hw *hw = rtlworks->hw; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask); -} - static struct rtl_intf_ops rtl_usb_ops = { .adapter_start = rtl_usb_start, .adapter_stop = rtl_usb_stop, @@ -1068,8 +1057,6 @@ int rtl_usb_probe(struct usb_interface *intf, /* this spin lock must be initialized early */ spin_lock_init(&rtlpriv->locks.usb_lock); - INIT_WORK(&rtlpriv->works.fill_h2c_cmd, - rtl_fill_h2c_cmd_work_callback); rtlpriv->usb_data_index = 0; init_completion(&rtlpriv->firmware_loading_complete); diff --git a/trunk/drivers/net/wireless/rtlwifi/wifi.h b/trunk/drivers/net/wireless/rtlwifi/wifi.h index cc03e7c87cbe..44328baa6389 100644 --- a/trunk/drivers/net/wireless/rtlwifi/wifi.h +++ b/trunk/drivers/net/wireless/rtlwifi/wifi.h @@ -1736,8 +1736,6 @@ struct rtl_hal_ops { void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, bool mstate); void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw); - void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, - u32 cmd_len, u8 *p_cmdbuffer); }; struct rtl_intf_ops { @@ -1871,7 +1869,6 @@ struct rtl_works { struct delayed_work fwevt_wq; struct work_struct lps_change_work; - struct work_struct fill_h2c_cmd; }; struct rtl_debug { @@ -2051,7 +2048,6 @@ struct rtl_priv { }; }; bool enter_ps; /* true when entering PS */ - u8 rate_mask[5]; /*This must be the last item so that it points to the data allocated diff --git a/trunk/drivers/net/wireless/ti/wl12xx/scan.c b/trunk/drivers/net/wireless/ti/wl12xx/scan.c index 4a0bbb13806b..affdb3ec6225 100644 --- a/trunk/drivers/net/wireless/ti/wl12xx/scan.c +++ b/trunk/drivers/net/wireless/ti/wl12xx/scan.c @@ -310,7 +310,7 @@ static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd, memcpy(cmd->channels_2, cmd_channels->channels_2, sizeof(cmd->channels_2)); memcpy(cmd->channels_5, cmd_channels->channels_5, - sizeof(cmd->channels_5)); + sizeof(cmd->channels_2)); /* channels_4 are not supported, so no need to copy them */ } diff --git a/trunk/drivers/net/wireless/ti/wl12xx/wl12xx.h b/trunk/drivers/net/wireless/ti/wl12xx/wl12xx.h index 9e5484a73667..222d03540200 100644 --- a/trunk/drivers/net/wireless/ti/wl12xx/wl12xx.h +++ b/trunk/drivers/net/wireless/ti/wl12xx/wl12xx.h @@ -36,12 +36,12 @@ #define WL127X_IFTYPE_SR_VER 3 #define WL127X_MAJOR_SR_VER 10 #define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE -#define WL127X_MINOR_SR_VER 133 +#define WL127X_MINOR_SR_VER 115 /* minimum multi-role FW version for wl127x */ #define WL127X_IFTYPE_MR_VER 5 #define WL127X_MAJOR_MR_VER 7 #define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE -#define WL127X_MINOR_MR_VER 42 +#define WL127X_MINOR_MR_VER 115 /* FW chip version for wl128x */ #define WL128X_CHIP_VER 7 @@ -49,7 +49,7 @@ #define WL128X_IFTYPE_SR_VER 3 #define WL128X_MAJOR_SR_VER 10 #define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE -#define WL128X_MINOR_SR_VER 133 +#define WL128X_MINOR_SR_VER 115 /* minimum multi-role FW version for wl128x */ #define WL128X_IFTYPE_MR_VER 5 #define WL128X_MAJOR_MR_VER 7 diff --git a/trunk/drivers/net/wireless/ti/wl18xx/scan.c b/trunk/drivers/net/wireless/ti/wl18xx/scan.c index 2b642f8c9266..09d944505ac0 100644 --- a/trunk/drivers/net/wireless/ti/wl18xx/scan.c +++ b/trunk/drivers/net/wireless/ti/wl18xx/scan.c @@ -34,7 +34,7 @@ static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd, memcpy(cmd->channels_2, cmd_channels->channels_2, sizeof(cmd->channels_2)); memcpy(cmd->channels_5, cmd_channels->channels_5, - sizeof(cmd->channels_5)); + sizeof(cmd->channels_2)); /* channels_4 are not supported, so no need to copy them */ } diff --git a/trunk/drivers/net/xen-netback/netback.c b/trunk/drivers/net/xen-netback/netback.c index 8c20935d72c9..37984e6d4e99 100644 --- a/trunk/drivers/net/xen-netback/netback.c +++ b/trunk/drivers/net/xen-netback/netback.c @@ -662,7 +662,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) { struct xenvif *vif = NULL, *tmp; s8 status; - u16 flags; + u16 irq, flags; struct xen_netif_rx_response *resp; struct sk_buff_head rxq; struct sk_buff *skb; @@ -771,13 +771,13 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) sco->meta_slots_used); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); + irq = vif->irq; + if (ret && list_empty(&vif->notify_list)) + list_add_tail(&vif->notify_list, ¬ify); xenvif_notify_tx_completion(vif); - if (ret && list_empty(&vif->notify_list)) - list_add_tail(&vif->notify_list, ¬ify); - else - xenvif_put(vif); + xenvif_put(vif); npo.meta_cons += sco->meta_slots_used; dev_kfree_skb(skb); } @@ -785,7 +785,6 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) { notify_remote_via_irq(vif->irq); list_del_init(&vif->notify_list); - xenvif_put(vif); } /* More work to do? */ diff --git a/trunk/drivers/nfc/Kconfig b/trunk/drivers/nfc/Kconfig index 74a852e4e41f..4775d4e61b88 100644 --- a/trunk/drivers/nfc/Kconfig +++ b/trunk/drivers/nfc/Kconfig @@ -28,7 +28,7 @@ config NFC_WILINK config NFC_MEI_PHY tristate "MEI bus NFC device support" - depends on INTEL_MEI && NFC_HCI + depends on INTEL_MEI_BUS_NFC && NFC_HCI help This adds support to use an mei bus nfc device. Select this if you will use an HCI NFC driver for an NFC chip connected behind an diff --git a/trunk/drivers/nfc/mei_phy.c b/trunk/drivers/nfc/mei_phy.c index 1201bdbfb791..b8f8abc422f0 100644 --- a/trunk/drivers/nfc/mei_phy.c +++ b/trunk/drivers/nfc/mei_phy.c @@ -64,15 +64,6 @@ int nfc_mei_phy_enable(void *phy_id) return r; } - r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy); - if (r) { - pr_err("MEY_PHY: Event cb registration failed\n"); - mei_cl_disable_device(phy->device); - phy->powered = 0; - - return r; - } - phy->powered = 1; return 0; diff --git a/trunk/drivers/nfc/microread/mei.c b/trunk/drivers/nfc/microread/mei.c index cdf1bc53b257..1ad044dce7b6 100644 --- a/trunk/drivers/nfc/microread/mei.c +++ b/trunk/drivers/nfc/microread/mei.c @@ -43,16 +43,24 @@ static int microread_mei_probe(struct mei_cl_device *device, return -ENOMEM; } + r = mei_cl_register_event_cb(device, nfc_mei_event_cb, phy); + if (r) { + pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); + goto err_out; + } + r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME, MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, &phy->hdev); - if (r < 0) { - nfc_mei_phy_free(phy); - - return r; - } + if (r < 0) + goto err_out; return 0; + +err_out: + nfc_mei_phy_free(phy); + + return r; } static int microread_mei_remove(struct mei_cl_device *device) @@ -63,6 +71,8 @@ static int microread_mei_remove(struct mei_cl_device *device) microread_remove(phy->hdev); + nfc_mei_phy_disable(phy); + nfc_mei_phy_free(phy); return 0; diff --git a/trunk/drivers/nfc/pn544/mei.c b/trunk/drivers/nfc/pn544/mei.c index b5d3d18179eb..1eb48848a35a 100644 --- a/trunk/drivers/nfc/pn544/mei.c +++ b/trunk/drivers/nfc/pn544/mei.c @@ -43,16 +43,24 @@ static int pn544_mei_probe(struct mei_cl_device *device, return -ENOMEM; } + r = mei_cl_register_event_cb(device, nfc_mei_event_cb, phy); + if (r) { + pr_err(PN544_DRIVER_NAME ": event cb registration failed\n"); + goto err_out; + } + r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME, MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, &phy->hdev); - if (r < 0) { - nfc_mei_phy_free(phy); - - return r; - } + if (r < 0) + goto err_out; return 0; + +err_out: + nfc_mei_phy_free(phy); + + return r; } static int pn544_mei_remove(struct mei_cl_device *device) @@ -63,6 +71,8 @@ static int pn544_mei_remove(struct mei_cl_device *device) pn544_hci_remove(phy->hdev); + nfc_mei_phy_disable(phy); + nfc_mei_phy_free(phy); return 0; diff --git a/trunk/drivers/ntb/ntb_hw.c b/trunk/drivers/ntb/ntb_hw.c index 2dacd19e1b8a..f802e7c92356 100644 --- a/trunk/drivers/ntb/ntb_hw.c +++ b/trunk/drivers/ntb/ntb_hw.c @@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val) */ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) { - if (mw >= NTB_NUM_MW) + if (mw > NTB_NUM_MW) return NULL; return ndev->mw[mw].vbase; @@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) */ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) { - if (mw >= NTB_NUM_MW) + if (mw > NTB_NUM_MW) return 0; return ndev->mw[mw].bar_sz; @@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) */ void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) { - if (mw >= NTB_NUM_MW) + if (mw > NTB_NUM_MW) return; dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, @@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ndev->mw[i].vbase = ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), ndev->mw[i].bar_sz); - dev_info(&pdev->dev, "MW %d size %llu\n", i, - pci_resource_len(pdev, MW_TO_BAR(i))); + dev_info(&pdev->dev, "MW %d size %d\n", i, + (u32) pci_resource_len(pdev, MW_TO_BAR(i))); if (!ndev->mw[i].vbase) { dev_warn(&pdev->dev, "Cannot remap BAR %d\n", MW_TO_BAR(i)); diff --git a/trunk/drivers/ntb/ntb_transport.c b/trunk/drivers/ntb/ntb_transport.c index f8d7081ee301..e0bdfd7f9930 100644 --- a/trunk/drivers/ntb/ntb_transport.c +++ b/trunk/drivers/ntb/ntb_transport.c @@ -58,7 +58,7 @@ #include #include "ntb_hw.h" -#define NTB_TRANSPORT_VERSION 3 +#define NTB_TRANSPORT_VERSION 2 static unsigned int transport_mtu = 0x401E; module_param(transport_mtu, uint, 0644); @@ -173,13 +173,10 @@ struct ntb_payload_header { enum { VERSION = 0, - QP_LINKS, + MW0_SZ, + MW1_SZ, NUM_QPS, - NUM_MWS, - MW0_SZ_HIGH, - MW0_SZ_LOW, - MW1_SZ_HIGH, - MW1_SZ_LOW, + QP_LINKS, MAX_SPAD, }; @@ -300,7 +297,7 @@ int ntb_register_client_dev(char *device_name) { struct ntb_transport_client_dev *client_dev; struct ntb_transport *nt; - int rc, i = 0; + int rc; if (list_empty(&ntb_transport_list)) return -ENODEV; @@ -318,7 +315,7 @@ int ntb_register_client_dev(char *device_name) dev = &client_dev->dev; /* setup and register client devices */ - dev_set_name(dev, "%s%d", device_name, i); + dev_set_name(dev, "%s", device_name); dev->bus = &ntb_bus_type; dev->release = ntb_client_release; dev->parent = &ntb_query_pdev(nt->ndev)->dev; @@ -330,7 +327,6 @@ int ntb_register_client_dev(char *device_name) } list_add_tail(&client_dev->entry, &nt->client_devs); - i++; } return 0; @@ -490,13 +486,12 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, (qp_num / NTB_NUM_MW * rx_size); rx_size -= sizeof(struct ntb_rx_info); - qp->rx_buff = qp->remote_rx_info + 1; - /* Due to housekeeping, there must be atleast 2 buffs */ - qp->rx_max_frame = min(transport_mtu, rx_size / 2); + qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info); + qp->rx_max_frame = min(transport_mtu, rx_size); qp->rx_max_entry = rx_size / qp->rx_max_frame; qp->rx_index = 0; - qp->remote_rx_info->entry = qp->rx_max_entry - 1; + qp->remote_rx_info->entry = qp->rx_max_entry; /* setup the hdr offsets with 0's */ for (i = 0; i < qp->rx_max_entry; i++) { @@ -507,19 +502,6 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, qp->rx_pkts = 0; qp->tx_pkts = 0; - qp->tx_index = 0; -} - -static void ntb_free_mw(struct ntb_transport *nt, int num_mw) -{ - struct ntb_transport_mw *mw = &nt->mw[num_mw]; - struct pci_dev *pdev = ntb_query_pdev(nt->ndev); - - if (!mw->virt_addr) - return; - - dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr); - mw->virt_addr = NULL; } static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) @@ -527,20 +509,12 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) struct ntb_transport_mw *mw = &nt->mw[num_mw]; struct pci_dev *pdev = ntb_query_pdev(nt->ndev); - /* No need to re-setup */ - if (mw->size == ALIGN(size, 4096)) - return 0; - - if (mw->size != 0) - ntb_free_mw(nt, num_mw); - /* Alloc memory for receiving data. Must be 4k aligned */ mw->size = ALIGN(size, 4096); mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, GFP_KERNEL); if (!mw->virt_addr) { - mw->size = 0; dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", (int) mw->size); return -ENOMEM; @@ -630,31 +604,25 @@ static void ntb_transport_link_work(struct work_struct *work) u32 val; int rc, i; - /* send the local info, in the opposite order of the way we read it */ - for (i = 0; i < NTB_NUM_MW; i++) { - rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), - ntb_get_mw_size(ndev, i) >> 32); - if (rc) { - dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", - (u32)(ntb_get_mw_size(ndev, i) >> 32), - MW0_SZ_HIGH + (i * 2)); - goto out; - } + /* send the local info */ + rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); + if (rc) { + dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", + 0, VERSION); + goto out; + } - rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2), - (u32) ntb_get_mw_size(ndev, i)); - if (rc) { - dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", - (u32) ntb_get_mw_size(ndev, i), - MW0_SZ_LOW + (i * 2)); - goto out; - } + rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0)); + if (rc) { + dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", + (u32) ntb_get_mw_size(ndev, 0), MW0_SZ); + goto out; } - rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW); + rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1)); if (rc) { dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - NTB_NUM_MW, NUM_MWS); + (u32) ntb_get_mw_size(ndev, 1), MW1_SZ); goto out; } @@ -665,10 +633,16 @@ static void ntb_transport_link_work(struct work_struct *work) goto out; } - rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); + rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val); + if (rc) { + dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS); + goto out; + } + + rc = ntb_write_remote_spad(ndev, QP_LINKS, val); if (rc) { dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - NTB_TRANSPORT_VERSION, VERSION); + val, QP_LINKS); goto out; } @@ -693,43 +667,33 @@ static void ntb_transport_link_work(struct work_struct *work) goto out; dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); - rc = ntb_read_remote_spad(ndev, NUM_MWS, &val); + rc = ntb_read_remote_spad(ndev, MW0_SZ, &val); if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS); + dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ); goto out; } - if (val != NTB_NUM_MW) + if (!val) goto out; - dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); - - for (i = 0; i < NTB_NUM_MW; i++) { - u64 val64; + dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val); - rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val); - if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", - MW0_SZ_HIGH + (i * 2)); - goto out1; - } - - val64 = (u64) val << 32; - - rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val); - if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", - MW0_SZ_LOW + (i * 2)); - goto out1; - } + rc = ntb_set_mw(nt, 0, val); + if (rc) + goto out; - val64 |= val; + rc = ntb_read_remote_spad(ndev, MW1_SZ, &val); + if (rc) { + dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ); + goto out; + } - dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64); + if (!val) + goto out; + dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val); - rc = ntb_set_mw(nt, i, val64); - if (rc) - goto out1; - } + rc = ntb_set_mw(nt, 1, val); + if (rc) + goto out; nt->transport_link = NTB_LINK_UP; @@ -744,9 +708,6 @@ static void ntb_transport_link_work(struct work_struct *work) return; -out1: - for (i = 0; i < NTB_NUM_MW; i++) - ntb_free_mw(nt, i); out: if (ntb_hw_link_status(ndev)) schedule_delayed_work(&nt->link_work, @@ -819,10 +780,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt, (qp_num / NTB_NUM_MW * tx_size); tx_size -= sizeof(struct ntb_rx_info); - qp->tx_mw = qp->rx_info + 1; - /* Due to housekeeping, there must be atleast 2 buffs */ - qp->tx_max_frame = min(transport_mtu, tx_size / 2); + qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info); + qp->tx_max_frame = min(transport_mtu, tx_size); qp->tx_max_entry = tx_size / qp->tx_max_frame; + qp->tx_index = 0; if (nt->debugfs_dir) { char debugfs_name[4]; @@ -936,7 +897,10 @@ void ntb_transport_free(void *transport) pdev = ntb_query_pdev(nt->ndev); for (i = 0; i < NTB_NUM_MW; i++) - ntb_free_mw(nt, i); + if (nt->mw[i].virt_addr) + dma_free_coherent(&pdev->dev, nt->mw[i].size, + nt->mw[i].virt_addr, + nt->mw[i].dma_addr); kfree(nt->qps); ntb_unregister_transport(nt->ndev); @@ -1035,16 +999,11 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) static void ntb_transport_rx(unsigned long data) { struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; - int rc, i; + int rc; - /* Limit the number of packets processed in a single interrupt to - * provide fairness to others - */ - for (i = 0; i < qp->rx_max_entry; i++) { + do { rc = ntb_process_rxc(qp); - if (rc) - break; - } + } while (!rc); } static void ntb_transport_rxc_db(void *data, int db_num) @@ -1251,14 +1210,12 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); */ void ntb_transport_free_queue(struct ntb_transport_qp *qp) { - struct pci_dev *pdev; + struct pci_dev *pdev = ntb_query_pdev(qp->ndev); struct ntb_queue_entry *entry; if (!qp) return; - pdev = ntb_query_pdev(qp->ndev); - cancel_delayed_work_sync(&qp->link_work); ntb_unregister_db_callback(qp->ndev, qp->qp_num); @@ -1414,13 +1371,12 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up); */ void ntb_transport_link_down(struct ntb_transport_qp *qp) { - struct pci_dev *pdev; + struct pci_dev *pdev = ntb_query_pdev(qp->ndev); int rc, val; if (!qp) return; - pdev = ntb_query_pdev(qp->ndev); qp->client_ready = NTB_LINK_DOWN; rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); @@ -1452,9 +1408,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down); */ bool ntb_transport_link_query(struct ntb_transport_qp *qp) { - if (!qp) - return false; - return qp->qp_link == NTB_LINK_UP; } EXPORT_SYMBOL_GPL(ntb_transport_link_query); @@ -1469,9 +1422,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query); */ unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) { - if (!qp) - return 0; - return qp->qp_num; } EXPORT_SYMBOL_GPL(ntb_transport_qp_num); @@ -1486,9 +1436,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num); */ unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) { - if (!qp) - return 0; - return qp->tx_max_frame - sizeof(struct ntb_payload_header); } EXPORT_SYMBOL_GPL(ntb_transport_max_size); diff --git a/trunk/drivers/of/base.c b/trunk/drivers/of/base.c index a6f584a7f4a1..c76d16c972cc 100644 --- a/trunk/drivers/of/base.c +++ b/trunk/drivers/of/base.c @@ -192,15 +192,14 @@ EXPORT_SYMBOL(of_find_property); struct device_node *of_find_all_nodes(struct device_node *prev) { struct device_node *np; - unsigned long flags; - raw_spin_lock_irqsave(&devtree_lock, flags); + raw_spin_lock(&devtree_lock); np = prev ? prev->allnext : of_allnodes; for (; np != NULL; np = np->allnext) if (of_node_get(np)) break; of_node_put(prev); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + raw_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_all_nodes); @@ -422,9 +421,8 @@ struct device_node *of_get_next_available_child(const struct device_node *node, struct device_node *prev) { struct device_node *next; - unsigned long flags; - raw_spin_lock_irqsave(&devtree_lock, flags); + raw_spin_lock(&devtree_lock); next = prev ? prev->sibling : node->child; for (; next; next = next->sibling) { if (!__of_device_is_available(next)) @@ -433,7 +431,7 @@ struct device_node *of_get_next_available_child(const struct device_node *node, break; } of_node_put(prev); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + raw_spin_unlock(&devtree_lock); return next; } EXPORT_SYMBOL(of_get_next_available_child); @@ -737,14 +735,13 @@ EXPORT_SYMBOL_GPL(of_modalias_node); struct device_node *of_find_node_by_phandle(phandle handle) { struct device_node *np; - unsigned long flags; - raw_spin_lock_irqsave(&devtree_lock, flags); + raw_spin_lock(&devtree_lock); for (np = of_allnodes; np; np = np->allnext) if (np->phandle == handle) break; of_node_get(np); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + raw_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_phandle); @@ -1211,11 +1208,11 @@ static int __of_parse_phandle_with_args(const struct device_node *np, out_args->args_count = count; for (i = 0; i < count; i++) out_args->args[i] = be32_to_cpup(list++); - } else { - of_node_put(node); } /* Found it! return success */ + if (node) + of_node_put(node); return 0; } diff --git a/trunk/drivers/parisc/iosapic.c b/trunk/drivers/parisc/iosapic.c index e79e006eb9ab..9544cdc0d1af 100644 --- a/trunk/drivers/parisc/iosapic.c +++ b/trunk/drivers/parisc/iosapic.c @@ -811,70 +811,6 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev) return pcidev->irq; } -static struct iosapic_info *first_isi = NULL; - -#ifdef CONFIG_64BIT -int iosapic_serial_irq(int num) -{ - struct iosapic_info *isi = first_isi; - struct irt_entry *irte = NULL; /* only used if PAT PDC */ - struct vector_info *vi; - int isi_line; /* line used by device */ - - /* lookup IRT entry for isi/slot/pin set */ - irte = &irt_cell[num]; - - DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n", - irte, - irte->entry_type, - irte->entry_length, - irte->polarity_trigger, - irte->src_bus_irq_devno, - irte->src_bus_id, - irte->src_seg_id, - irte->dest_iosapic_intin, - (u32) irte->dest_iosapic_addr); - isi_line = irte->dest_iosapic_intin; - - /* get vector info for this input line */ - vi = isi->isi_vector + isi_line; - DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi); - - /* If this IRQ line has already been setup, skip it */ - if (vi->irte) - goto out; - - vi->irte = irte; - - /* - * Allocate processor IRQ - * - * XXX/FIXME The txn_alloc_irq() code and related code should be - * moved to enable_irq(). That way we only allocate processor IRQ - * bits for devices that actually have drivers claiming them. - * Right now we assign an IRQ to every PCI device present, - * regardless of whether it's used or not. - */ - vi->txn_irq = txn_alloc_irq(8); - - if (vi->txn_irq < 0) - panic("I/O sapic: couldn't get TXN IRQ\n"); - - /* enable_irq() will use txn_* to program IRdT */ - vi->txn_addr = txn_alloc_addr(vi->txn_irq); - vi->txn_data = txn_alloc_data(vi->txn_irq); - - vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI; - vi->eoi_data = cpu_to_le32(vi->txn_data); - - cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi); - - out: - - return vi->txn_irq; -} -#endif - /* ** squirrel away the I/O Sapic Version @@ -941,8 +877,6 @@ void *iosapic_register(unsigned long hpa) vip->irqline = (unsigned char) cnt; vip->iosapic = isi; } - if (!first_isi) - first_isi = isi; return isi; } diff --git a/trunk/drivers/parisc/lba_pci.c b/trunk/drivers/parisc/lba_pci.c index 1f05913ae677..2ef7103270bb 100644 --- a/trunk/drivers/parisc/lba_pci.c +++ b/trunk/drivers/parisc/lba_pci.c @@ -668,7 +668,7 @@ lba_fixup_bus(struct pci_bus *bus) BUG(); } - if (ldev->hba.elmmio_space.flags) { + if (ldev->hba.elmmio_space.start) { err = request_resource(&iomem_resource, &(ldev->hba.elmmio_space)); if (err < 0) { @@ -993,7 +993,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) case PAT_LMMIO: /* used to fix up pre-initialized MEM BARs */ - if (!lba_dev->hba.lmmio_space.flags) { + if (!lba_dev->hba.lmmio_space.start) { sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO", (int)lba_dev->hba.bus_num.start); @@ -1001,7 +1001,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) io->start; r = &lba_dev->hba.lmmio_space; r->name = lba_dev->hba.lmmio_name; - } else if (!lba_dev->hba.elmmio_space.flags) { + } else if (!lba_dev->hba.elmmio_space.start) { sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO", (int)lba_dev->hba.bus_num.start); @@ -1096,7 +1096,6 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) r->name = "LBA PCI Busses"; r->start = lba_num & 0xff; r->end = (lba_num>>8) & 0xff; - r->flags = IORESOURCE_BUS; /* Set up local PCI Bus resources - we don't need them for ** Legacy boxes but it's nice to see in /proc/iomem. @@ -1495,7 +1494,7 @@ lba_driver_probe(struct parisc_device *dev) pci_add_resource_offset(&resources, &lba_dev->hba.io_space, HBA_PORT_BASE(lba_dev->hba.hba_num)); - if (lba_dev->hba.elmmio_space.flags) + if (lba_dev->hba.elmmio_space.start) pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space, lba_dev->hba.lmmio_space_offset); if (lba_dev->hba.lmmio_space.flags) diff --git a/trunk/drivers/parisc/superio.c b/trunk/drivers/parisc/superio.c index a042d065a0c7..ac6e8e7a02df 100644 --- a/trunk/drivers/parisc/superio.c +++ b/trunk/drivers/parisc/superio.c @@ -494,4 +494,15 @@ static struct pci_driver superio_driver = { .probe = superio_probe, }; -module_pci_driver(superio_driver); +static int __init superio_modinit(void) +{ + return pci_register_driver(&superio_driver); +} + +static void __exit superio_exit(void) +{ + pci_unregister_driver(&superio_driver); +} + +module_init(superio_modinit); +module_exit(superio_exit); diff --git a/trunk/drivers/parport/Kconfig b/trunk/drivers/parport/Kconfig index a50576081b34..24e12d4d1769 100644 --- a/trunk/drivers/parport/Kconfig +++ b/trunk/drivers/parport/Kconfig @@ -71,7 +71,7 @@ config PARPORT_PC_FIFO config PARPORT_PC_SUPERIO bool "SuperIO chipset support" - depends on PARPORT_PC && !PARISC + depends on PARPORT_PC help Saying Y here enables some probes for Super-IO chipsets in order to find out things like base addresses, IRQ lines and DMA channels. It diff --git a/trunk/drivers/parport/parport_gsc.c b/trunk/drivers/parport/parport_gsc.c index 6e3a60c78873..a5251cb5fb0c 100644 --- a/trunk/drivers/parport/parport_gsc.c +++ b/trunk/drivers/parport/parport_gsc.c @@ -234,7 +234,7 @@ static int parport_PS2_supported(struct parport *pb) struct parport *parport_gsc_probe_port(unsigned long base, unsigned long base_hi, int irq, - int dma, struct parisc_device *padev) + int dma, struct pci_dev *dev) { struct parport_gsc_private *priv; struct parport_operations *ops; @@ -258,6 +258,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, priv->ctr_writable = 0xff; priv->dma_buf = 0; priv->dma_handle = 0; + priv->dev = dev; p->base = base; p->base_hi = base_hi; p->irq = irq; @@ -281,7 +282,6 @@ struct parport *parport_gsc_probe_port(unsigned long base, return NULL; } - p->dev = &padev->dev; p->base_hi = base_hi; p->modes = tmp.modes; p->size = (p->modes & PARPORT_MODE_EPP)?8:3; @@ -373,7 +373,7 @@ static int parport_init_chip(struct parisc_device *dev) } p = parport_gsc_probe_port(port, 0, dev->irq, - /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, dev); + /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL); if (p) parport_count++; dev_set_drvdata(&dev->dev, p); diff --git a/trunk/drivers/parport/parport_gsc.h b/trunk/drivers/parport/parport_gsc.h index 812214768d27..fc9c37c54022 100644 --- a/trunk/drivers/parport/parport_gsc.h +++ b/trunk/drivers/parport/parport_gsc.h @@ -217,6 +217,6 @@ extern void parport_gsc_dec_use_count(void); extern struct parport *parport_gsc_probe_port(unsigned long base, unsigned long base_hi, int irq, int dma, - struct parisc_device *padev); + struct pci_dev *dev); #endif /* __DRIVERS_PARPORT_PARPORT_GSC_H */ diff --git a/trunk/drivers/pci/hotplug/acpiphp_glue.c b/trunk/drivers/pci/hotplug/acpiphp_glue.c index 59df8575a48c..96fed19c6d90 100644 --- a/trunk/drivers/pci/hotplug/acpiphp_glue.c +++ b/trunk/drivers/pci/hotplug/acpiphp_glue.c @@ -61,7 +61,6 @@ static DEFINE_MUTEX(bridge_mutex); static void handle_hotplug_event_bridge (acpi_handle, u32, void *); static void acpiphp_sanitize_bus(struct pci_bus *bus); static void acpiphp_set_hpp_values(struct pci_bus *bus); -static void hotplug_event_func(acpi_handle handle, u32 type, void *context); static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); static void free_bridge(struct kref *kref); @@ -148,7 +147,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, static const struct acpi_dock_ops acpiphp_dock_ops = { - .handler = hotplug_event_func, + .handler = handle_hotplug_event_func, }; /* Check whether the PCI device is managed by native PCIe hotplug driver */ @@ -180,20 +179,6 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev) return true; } -static void acpiphp_dock_init(void *data) -{ - struct acpiphp_func *func = data; - - get_bridge(func->slot->bridge); -} - -static void acpiphp_dock_release(void *data) -{ - struct acpiphp_func *func = data; - - put_bridge(func->slot->bridge); -} - /* callback routine to register each ACPI PCI slot object */ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) @@ -313,8 +298,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) */ newfunc->flags &= ~FUNC_HAS_EJ0; if (register_hotplug_dock_device(handle, - &acpiphp_dock_ops, newfunc, - acpiphp_dock_init, acpiphp_dock_release)) + &acpiphp_dock_ops, newfunc)) dbg("failed to register dock device\n"); /* we need to be notified when dock events happen @@ -686,7 +670,6 @@ static int __ref enable_device(struct acpiphp_slot *slot) struct pci_bus *bus = slot->bridge->pci_bus; struct acpiphp_func *func; int num, max, pass; - LIST_HEAD(add_list); if (slot->flags & SLOT_ENABLED) goto err_exit; @@ -711,15 +694,13 @@ static int __ref enable_device(struct acpiphp_slot *slot) max = pci_scan_bridge(bus, dev, max, pass); if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); - pcibios_resource_survey_bus(dev->subordinate); - __pci_bus_size_bridges(dev->subordinate, - &add_list); + pci_bus_size_bridges(dev->subordinate); } } } } - __pci_bus_assign_resources(bus, &add_list, NULL); + pci_bus_assign_resources(bus); acpiphp_sanitize_bus(bus); acpiphp_set_hpp_values(bus); acpiphp_set_acpi_region(slot); @@ -969,20 +950,6 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK ; } -void acpiphp_check_host_bridge(acpi_handle handle) -{ - struct acpiphp_bridge *bridge; - - bridge = acpiphp_handle_to_bridge(handle); - if (bridge) { - acpiphp_check_bridge(bridge); - put_bridge(bridge); - } - - acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, - ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL); -} - static void _handle_hotplug_event_bridge(struct work_struct *work) { struct acpiphp_bridge *bridge; @@ -1084,12 +1051,22 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge); } -static void hotplug_event_func(acpi_handle handle, u32 type, void *context) +static void _handle_hotplug_event_func(struct work_struct *work) { - struct acpiphp_func *func = context; + struct acpiphp_func *func; char objname[64]; struct acpi_buffer buffer = { .length = sizeof(objname), .pointer = objname }; + struct acpi_hp_work *hp_work; + acpi_handle handle; + u32 type; + + hp_work = container_of(work, struct acpi_hp_work, work); + handle = hp_work->handle; + type = hp_work->type; + func = (struct acpiphp_func *)hp_work->context; + + acpi_scan_lock_acquire(); acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); @@ -1122,18 +1099,6 @@ static void hotplug_event_func(acpi_handle handle, u32 type, void *context) warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); break; } -} - -static void _handle_hotplug_event_func(struct work_struct *work) -{ - struct acpi_hp_work *hp_work; - struct acpiphp_func *func; - - hp_work = container_of(work, struct acpi_hp_work, work); - func = hp_work->context; - acpi_scan_lock_acquire(); - - hotplug_event_func(hp_work->handle, hp_work->type, func); acpi_scan_lock_release(); kfree(hp_work); /* allocated in handle_hotplug_event_func */ diff --git a/trunk/drivers/pci/pci.h b/trunk/drivers/pci/pci.h index d1182c4a754e..68678ed76b0d 100644 --- a/trunk/drivers/pci/pci.h +++ b/trunk/drivers/pci/pci.h @@ -202,11 +202,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int reg); int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); void pci_configure_ari(struct pci_dev *dev); -void __ref __pci_bus_size_bridges(struct pci_bus *bus, - struct list_head *realloc_head); -void __ref __pci_bus_assign_resources(const struct pci_bus *bus, - struct list_head *realloc_head, - struct list_head *fail_head); /** * pci_ari_enabled - query ARI forwarding status diff --git a/trunk/drivers/pci/pcie/aer/aerdrv_core.c b/trunk/drivers/pci/pcie/aer/aerdrv_core.c index 0f4554e48cc5..8ec8b4f48560 100644 --- a/trunk/drivers/pci/pcie/aer/aerdrv_core.c +++ b/trunk/drivers/pci/pcie/aer/aerdrv_core.c @@ -580,7 +580,6 @@ struct aer_recover_entry u8 devfn; u16 domain; int severity; - struct aer_capability_regs *regs; }; static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry, @@ -594,7 +593,7 @@ static DEFINE_SPINLOCK(aer_recover_ring_lock); static DECLARE_WORK(aer_recover_work, aer_recover_work_func); void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, - int severity, struct aer_capability_regs *aer_regs) + int severity) { unsigned long flags; struct aer_recover_entry entry = { @@ -602,7 +601,6 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, .devfn = devfn, .domain = domain, .severity = severity, - .regs = aer_regs, }; spin_lock_irqsave(&aer_recover_ring_lock, flags); @@ -629,7 +627,6 @@ static void aer_recover_work_func(struct work_struct *work) PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn)); continue; } - cper_print_aer(pdev, entry.severity, entry.regs); do_recovery(pdev, entry.severity); pci_dev_put(pdev); } diff --git a/trunk/drivers/pci/pcie/aer/aerdrv_errprint.c b/trunk/drivers/pci/pcie/aer/aerdrv_errprint.c index 2c7c9f5f592c..5ab14251839d 100644 --- a/trunk/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/trunk/drivers/pci/pcie/aer/aerdrv_errprint.c @@ -220,7 +220,7 @@ int cper_severity_to_aer(int cper_severity) } EXPORT_SYMBOL_GPL(cper_severity_to_aer); -void cper_print_aer(struct pci_dev *dev, int cper_severity, +void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity, struct aer_capability_regs *aer) { int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0; @@ -244,7 +244,7 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity, agent = AER_GET_AGENT(aer_severity, status); dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask); - cper_print_bits("", status, status_strs, status_strs_size); + cper_print_bits(prefix, status, status_strs, status_strs_size); dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", aer_error_layer[layer], aer_agent_string[agent]); if (aer_severity != AER_CORRECTABLE) diff --git a/trunk/drivers/pci/setup-bus.c b/trunk/drivers/pci/setup-bus.c index d254e2379533..16abaaa1f83c 100644 --- a/trunk/drivers/pci/setup-bus.c +++ b/trunk/drivers/pci/setup-bus.c @@ -1044,7 +1044,7 @@ static void pci_bus_size_cardbus(struct pci_bus *bus, ; } -void __ref __pci_bus_size_bridges(struct pci_bus *bus, +static void __ref __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) { struct pci_dev *dev; @@ -1115,9 +1115,9 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) } EXPORT_SYMBOL(pci_bus_size_bridges); -void __ref __pci_bus_assign_resources(const struct pci_bus *bus, - struct list_head *realloc_head, - struct list_head *fail_head) +static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, + struct list_head *realloc_head, + struct list_head *fail_head) { struct pci_bus *b; struct pci_dev *dev; diff --git a/trunk/drivers/pinctrl/pinconf.c b/trunk/drivers/pinctrl/pinconf.c index 694c3ace4520..c67c37e23dd7 100644 --- a/trunk/drivers/pinctrl/pinconf.c +++ b/trunk/drivers/pinctrl/pinconf.c @@ -610,7 +610,7 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d) bool found = false; unsigned long config; - mutex_lock(&pinctrl_maps_mutex); + mutex_lock(&pctldev->mutex); /* Parse the pinctrl map and look for the elected pin/state */ for_each_maps(maps_node, i, map) { @@ -659,7 +659,7 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d) confops->pin_config_config_dbg_show(pctldev, s, config); exit: - mutex_unlock(&pinctrl_maps_mutex); + mutex_unlock(&pctldev->mutex); return 0; } diff --git a/trunk/drivers/pinctrl/pinctrl-abx500.c b/trunk/drivers/pinctrl/pinctrl-abx500.c index 6d4532702f80..aa17f7580f61 100644 --- a/trunk/drivers/pinctrl/pinctrl-abx500.c +++ b/trunk/drivers/pinctrl/pinctrl-abx500.c @@ -851,12 +851,23 @@ static int abx500_gpio_probe(struct platform_device *pdev) if (abx500_pdata) pdata = abx500_pdata->gpio; + if (!pdata) { + if (np) { + const struct of_device_id *match; - if (!(pdata || np)) { - dev_err(&pdev->dev, "gpio dt and platform data missing\n"); - return -ENODEV; + match = of_match_device(abx500_gpio_match, &pdev->dev); + if (!match) + return -ENODEV; + id = (unsigned long)match->data; + } else { + dev_err(&pdev->dev, "gpio dt and platform data missing\n"); + return -ENODEV; + } } + if (platid) + id = platid->driver_data; + pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl), GFP_KERNEL); if (pct == NULL) { @@ -871,16 +882,6 @@ static int abx500_gpio_probe(struct platform_device *pdev) pct->chip.dev = &pdev->dev; pct->chip.base = (np) ? -1 : pdata->gpio_base; - if (platid) - id = platid->driver_data; - else if (np) { - const struct of_device_id *match; - - match = of_match_device(abx500_gpio_match, &pdev->dev); - if (match) - id = (unsigned long)match->data; - } - /* initialize the lock */ mutex_init(&pct->lock); @@ -899,7 +900,8 @@ static int abx500_gpio_probe(struct platform_device *pdev) abx500_pinctrl_ab8505_init(&pct->soc); break; default: - dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", id); + dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", + (int) platid->driver_data); mutex_destroy(&pct->lock); return -EINVAL; } diff --git a/trunk/drivers/pinctrl/pinctrl-coh901.c b/trunk/drivers/pinctrl/pinctrl-coh901.c index d6b41747d687..edde3acc4186 100644 --- a/trunk/drivers/pinctrl/pinctrl-coh901.c +++ b/trunk/drivers/pinctrl/pinctrl-coh901.c @@ -713,6 +713,11 @@ static int __init u300_gpio_probe(struct platform_device *pdev) gpio->dev = &pdev->dev; memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!memres) { + dev_err(gpio->dev, "could not get GPIO memory resource\n"); + return -ENODEV; + } + gpio->base = devm_ioremap_resource(&pdev->dev, memres); if (IS_ERR(gpio->base)) return PTR_ERR(gpio->base); @@ -830,8 +835,7 @@ static int __init u300_gpio_probe(struct platform_device *pdev) return 0; err_no_range: - if (gpiochip_remove(&gpio->chip)) - dev_err(&pdev->dev, "failed to remove gpio chip\n"); + err = gpiochip_remove(&gpio->chip); err_no_chip: err_no_domain: err_no_port: diff --git a/trunk/drivers/pinctrl/pinctrl-exynos.c b/trunk/drivers/pinctrl/pinctrl-exynos.c index 2d76f66a2e0b..ac742817ebce 100644 --- a/trunk/drivers/pinctrl/pinctrl-exynos.c +++ b/trunk/drivers/pinctrl/pinctrl-exynos.c @@ -196,12 +196,6 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data) return IRQ_HANDLED; } -struct exynos_eint_gpio_save { - u32 eint_con; - u32 eint_fltcon0; - u32 eint_fltcon1; -}; - /* * exynos_eint_gpio_init() - setup handling of external gpio interrupts. * @d: driver data of samsung pinctrl driver. @@ -210,8 +204,8 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d) { struct samsung_pin_bank *bank; struct device *dev = d->dev; - int ret; - int i; + unsigned int ret; + unsigned int i; if (!d->irq) { dev_err(dev, "irq number not available\n"); @@ -233,29 +227,11 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d) bank->nr_pins, &exynos_gpio_irqd_ops, bank); if (!bank->irq_domain) { dev_err(dev, "gpio irq domain add failed\n"); - ret = -ENXIO; - goto err_domains; - } - - bank->soc_priv = devm_kzalloc(d->dev, - sizeof(struct exynos_eint_gpio_save), GFP_KERNEL); - if (!bank->soc_priv) { - irq_domain_remove(bank->irq_domain); - ret = -ENOMEM; - goto err_domains; + return -ENXIO; } } return 0; - -err_domains: - for (--i, --bank; i >= 0; --i, --bank) { - if (bank->eint_type != EINT_TYPE_GPIO) - continue; - irq_domain_remove(bank->irq_domain); - } - - return ret; } static void exynos_wkup_irq_unmask(struct irq_data *irqd) @@ -350,28 +326,6 @@ static int exynos_wkup_irq_set_type(struct irq_data *irqd, unsigned int type) return 0; } -static u32 exynos_eint_wake_mask = 0xffffffff; - -u32 exynos_get_eint_wake_mask(void) -{ - return exynos_eint_wake_mask; -} - -static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on) -{ - struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); - unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq); - - pr_info("wake %s for irq %d\n", on ? "enabled" : "disabled", irqd->irq); - - if (!on) - exynos_eint_wake_mask |= bit; - else - exynos_eint_wake_mask &= ~bit; - - return 0; -} - /* * irq_chip for wakeup interrupts */ @@ -381,7 +335,6 @@ static struct irq_chip exynos_wkup_irq_chip = { .irq_mask = exynos_wkup_irq_mask, .irq_ack = exynos_wkup_irq_ack, .irq_set_type = exynos_wkup_irq_set_type, - .irq_set_wake = exynos_wkup_irq_set_wake, }; /* interrupt handler for wakeup interrupts 0..15 */ @@ -552,72 +505,6 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) return 0; } -static void exynos_pinctrl_suspend_bank( - struct samsung_pinctrl_drv_data *drvdata, - struct samsung_pin_bank *bank) -{ - struct exynos_eint_gpio_save *save = bank->soc_priv; - void __iomem *regs = drvdata->virt_base; - - save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET - + bank->eint_offset); - save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset); - save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset + 4); - - pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); - pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); - pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); -} - -static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) -{ - struct samsung_pin_ctrl *ctrl = drvdata->ctrl; - struct samsung_pin_bank *bank = ctrl->pin_banks; - int i; - - for (i = 0; i < ctrl->nr_banks; ++i, ++bank) - if (bank->eint_type == EINT_TYPE_GPIO) - exynos_pinctrl_suspend_bank(drvdata, bank); -} - -static void exynos_pinctrl_resume_bank( - struct samsung_pinctrl_drv_data *drvdata, - struct samsung_pin_bank *bank) -{ - struct exynos_eint_gpio_save *save = bank->soc_priv; - void __iomem *regs = drvdata->virt_base; - - pr_debug("%s: con %#010x => %#010x\n", bank->name, - readl(regs + EXYNOS_GPIO_ECON_OFFSET - + bank->eint_offset), save->eint_con); - pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name, - readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset), save->eint_fltcon0); - pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, - readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset + 4), save->eint_fltcon1); - - writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET - + bank->eint_offset); - writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset); - writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset + 4); -} - -static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) -{ - struct samsung_pin_ctrl *ctrl = drvdata->ctrl; - struct samsung_pin_bank *bank = ctrl->pin_banks; - int i; - - for (i = 0; i < ctrl->nr_banks; ++i, ++bank) - if (bank->eint_type == EINT_TYPE_GPIO) - exynos_pinctrl_resume_bank(drvdata, bank); -} - /* pin banks of exynos4210 pin-controller 0 */ static struct samsung_pin_bank exynos4210_pin_banks0[] = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -681,8 +568,6 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, .label = "exynos4210-gpio-ctrl0", }, { /* pin-controller instance 1 data */ @@ -697,8 +582,6 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = { .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, .label = "exynos4210-gpio-ctrl1", }, { /* pin-controller instance 2 data */ @@ -780,8 +663,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, .label = "exynos4x12-gpio-ctrl0", }, { /* pin-controller instance 1 data */ @@ -796,8 +677,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, .label = "exynos4x12-gpio-ctrl1", }, { /* pin-controller instance 2 data */ @@ -808,8 +687,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, .label = "exynos4x12-gpio-ctrl2", }, { /* pin-controller instance 3 data */ @@ -820,8 +697,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, .label = "exynos4x12-gpio-ctrl3", }, }; @@ -900,8 +775,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, .label = "exynos5250-gpio-ctrl0", }, { /* pin-controller instance 1 data */ @@ -912,8 +785,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, .label = "exynos5250-gpio-ctrl1", }, { /* pin-controller instance 2 data */ @@ -924,8 +795,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, .label = "exynos5250-gpio-ctrl2", }, { /* pin-controller instance 3 data */ @@ -936,8 +805,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, .label = "exynos5250-gpio-ctrl3", }, }; diff --git a/trunk/drivers/pinctrl/pinctrl-exynos.h b/trunk/drivers/pinctrl/pinctrl-exynos.h index 3c91c357792f..9b1f77a5bf0f 100644 --- a/trunk/drivers/pinctrl/pinctrl-exynos.h +++ b/trunk/drivers/pinctrl/pinctrl-exynos.h @@ -19,7 +19,6 @@ /* External GPIO and wakeup interrupt related definitions */ #define EXYNOS_GPIO_ECON_OFFSET 0x700 -#define EXYNOS_GPIO_EFLTCON_OFFSET 0x800 #define EXYNOS_GPIO_EMASK_OFFSET 0x900 #define EXYNOS_GPIO_EPEND_OFFSET 0xA00 #define EXYNOS_WKUP_ECON_OFFSET 0xE00 diff --git a/trunk/drivers/pinctrl/pinctrl-exynos5440.c b/trunk/drivers/pinctrl/pinctrl-exynos5440.c index 32a48f44f574..6038503ed929 100644 --- a/trunk/drivers/pinctrl/pinctrl-exynos5440.c +++ b/trunk/drivers/pinctrl/pinctrl-exynos5440.c @@ -1000,6 +1000,11 @@ static int exynos5440_pinctrl_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "cannot find IO resource\n"); + return -ENOENT; + } + priv->reg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->reg_base)) return PTR_ERR(priv->reg_base); diff --git a/trunk/drivers/pinctrl/pinctrl-lantiq.c b/trunk/drivers/pinctrl/pinctrl-lantiq.c index d22ca252b80d..615c5002b757 100644 --- a/trunk/drivers/pinctrl/pinctrl-lantiq.c +++ b/trunk/drivers/pinctrl/pinctrl-lantiq.c @@ -52,8 +52,7 @@ static void ltq_pinctrl_dt_free_map(struct pinctrl_dev *pctldev, int i; for (i = 0; i < num_maps; i++) - if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN || - map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) + if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN) kfree(map[i].data.configs.configs); kfree(map); } diff --git a/trunk/drivers/pinctrl/pinctrl-samsung.c b/trunk/drivers/pinctrl/pinctrl-samsung.c index 63ac22e89678..976366899f68 100644 --- a/trunk/drivers/pinctrl/pinctrl-samsung.c +++ b/trunk/drivers/pinctrl/pinctrl-samsung.c @@ -28,7 +28,6 @@ #include #include #include -#include #include "core.h" #include "pinctrl-samsung.h" @@ -49,9 +48,6 @@ static struct pin_config { { "samsung,pin-pud-pdn", PINCFG_TYPE_PUD_PDN }, }; -/* Global list of devices (struct samsung_pinctrl_drv_data) */ -LIST_HEAD(drvdata_list); - static unsigned int pin_base; static inline struct samsung_pin_bank *gc_to_pin_bank(struct gpio_chip *gc) @@ -936,6 +932,11 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) drvdata->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "cannot find IO resource\n"); + return -ENOENT; + } + drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(drvdata->virt_base)) return PTR_ERR(drvdata->virt_base); @@ -960,151 +961,9 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) ctrl->eint_wkup_init(drvdata); platform_set_drvdata(pdev, drvdata); - - /* Add to the global list */ - list_add_tail(&drvdata->node, &drvdata_list); - return 0; } -#ifdef CONFIG_PM - -/** - * samsung_pinctrl_suspend_dev - save pinctrl state for suspend for a device - * - * Save data for all banks handled by this device. - */ -static void samsung_pinctrl_suspend_dev( - struct samsung_pinctrl_drv_data *drvdata) -{ - struct samsung_pin_ctrl *ctrl = drvdata->ctrl; - void __iomem *virt_base = drvdata->virt_base; - int i; - - for (i = 0; i < ctrl->nr_banks; i++) { - struct samsung_pin_bank *bank = &ctrl->pin_banks[i]; - void __iomem *reg = virt_base + bank->pctl_offset; - - u8 *offs = bank->type->reg_offset; - u8 *widths = bank->type->fld_width; - enum pincfg_type type; - - /* Registers without a powerdown config aren't lost */ - if (!widths[PINCFG_TYPE_CON_PDN]) - continue; - - for (type = 0; type < PINCFG_TYPE_NUM; type++) - if (widths[type]) - bank->pm_save[type] = readl(reg + offs[type]); - - if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) { - /* Some banks have two config registers */ - bank->pm_save[PINCFG_TYPE_NUM] = - readl(reg + offs[PINCFG_TYPE_FUNC] + 4); - pr_debug("Save %s @ %p (con %#010x %08x)\n", - bank->name, reg, - bank->pm_save[PINCFG_TYPE_FUNC], - bank->pm_save[PINCFG_TYPE_NUM]); - } else { - pr_debug("Save %s @ %p (con %#010x)\n", bank->name, - reg, bank->pm_save[PINCFG_TYPE_FUNC]); - } - } - - if (ctrl->suspend) - ctrl->suspend(drvdata); -} - -/** - * samsung_pinctrl_resume_dev - restore pinctrl state from suspend for a device - * - * Restore one of the banks that was saved during suspend. - * - * We don't bother doing anything complicated to avoid glitching lines since - * we're called before pad retention is turned off. - */ -static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata) -{ - struct samsung_pin_ctrl *ctrl = drvdata->ctrl; - void __iomem *virt_base = drvdata->virt_base; - int i; - - if (ctrl->resume) - ctrl->resume(drvdata); - - for (i = 0; i < ctrl->nr_banks; i++) { - struct samsung_pin_bank *bank = &ctrl->pin_banks[i]; - void __iomem *reg = virt_base + bank->pctl_offset; - - u8 *offs = bank->type->reg_offset; - u8 *widths = bank->type->fld_width; - enum pincfg_type type; - - /* Registers without a powerdown config aren't lost */ - if (!widths[PINCFG_TYPE_CON_PDN]) - continue; - - if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) { - /* Some banks have two config registers */ - pr_debug("%s @ %p (con %#010x %08x => %#010x %08x)\n", - bank->name, reg, - readl(reg + offs[PINCFG_TYPE_FUNC]), - readl(reg + offs[PINCFG_TYPE_FUNC] + 4), - bank->pm_save[PINCFG_TYPE_FUNC], - bank->pm_save[PINCFG_TYPE_NUM]); - writel(bank->pm_save[PINCFG_TYPE_NUM], - reg + offs[PINCFG_TYPE_FUNC] + 4); - } else { - pr_debug("%s @ %p (con %#010x => %#010x)\n", bank->name, - reg, readl(reg + offs[PINCFG_TYPE_FUNC]), - bank->pm_save[PINCFG_TYPE_FUNC]); - } - for (type = 0; type < PINCFG_TYPE_NUM; type++) - if (widths[type]) - writel(bank->pm_save[type], reg + offs[type]); - } -} - -/** - * samsung_pinctrl_suspend - save pinctrl state for suspend - * - * Save data for all banks across all devices. - */ -static int samsung_pinctrl_suspend(void) -{ - struct samsung_pinctrl_drv_data *drvdata; - - list_for_each_entry(drvdata, &drvdata_list, node) { - samsung_pinctrl_suspend_dev(drvdata); - } - - return 0; -} - -/** - * samsung_pinctrl_resume - restore pinctrl state for suspend - * - * Restore data for all banks across all devices. - */ -static void samsung_pinctrl_resume(void) -{ - struct samsung_pinctrl_drv_data *drvdata; - - list_for_each_entry_reverse(drvdata, &drvdata_list, node) { - samsung_pinctrl_resume_dev(drvdata); - } -} - -#else -#define samsung_pinctrl_suspend NULL -#define samsung_pinctrl_resume NULL -#endif - -static struct syscore_ops samsung_pinctrl_syscore_ops = { - .suspend = samsung_pinctrl_suspend, - .resume = samsung_pinctrl_resume, -}; - static const struct of_device_id samsung_pinctrl_dt_match[] = { #ifdef CONFIG_PINCTRL_EXYNOS { .compatible = "samsung,exynos4210-pinctrl", @@ -1133,14 +992,6 @@ static struct platform_driver samsung_pinctrl_driver = { static int __init samsung_pinctrl_drv_register(void) { - /* - * Register syscore ops for save/restore of registers across suspend. - * It's important to ensure that this driver is running at an earlier - * initcall level than any arch-specific init calls that install syscore - * ops that turn off pad retention (like exynos_pm_resume). - */ - register_syscore_ops(&samsung_pinctrl_syscore_ops); - return platform_driver_register(&samsung_pinctrl_driver); } postcore_initcall(samsung_pinctrl_drv_register); diff --git a/trunk/drivers/pinctrl/pinctrl-samsung.h b/trunk/drivers/pinctrl/pinctrl-samsung.h index 26d3519240c9..7c7f9ebcd05b 100644 --- a/trunk/drivers/pinctrl/pinctrl-samsung.h +++ b/trunk/drivers/pinctrl/pinctrl-samsung.h @@ -127,7 +127,6 @@ struct samsung_pin_bank_type { * @gpio_chip: GPIO chip of the bank. * @grange: linux gpio pin range supported by this bank. * @slock: spinlock protecting bank registers - * @pm_save: saved register values during suspend */ struct samsung_pin_bank { struct samsung_pin_bank_type *type; @@ -139,15 +138,12 @@ struct samsung_pin_bank { u32 eint_mask; u32 eint_offset; char *name; - void *soc_priv; struct device_node *of_node; struct samsung_pinctrl_drv_data *drvdata; struct irq_domain *irq_domain; struct gpio_chip gpio_chip; struct pinctrl_gpio_range grange; spinlock_t slock; - - u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/ }; /** @@ -188,15 +184,11 @@ struct samsung_pin_ctrl { int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *); int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *); - void (*suspend)(struct samsung_pinctrl_drv_data *); - void (*resume)(struct samsung_pinctrl_drv_data *); - char *label; }; /** * struct samsung_pinctrl_drv_data: wrapper for holding driver data together. - * @node: global list node * @virt_base: register base address of the controller. * @dev: device instance representing the controller. * @irq: interrpt number used by the controller to notify gpio interrupts. @@ -209,7 +201,6 @@ struct samsung_pin_ctrl { * @nr_function: number of such pin functions. */ struct samsung_pinctrl_drv_data { - struct list_head node; void __iomem *virt_base; struct device *dev; int irq; diff --git a/trunk/drivers/pinctrl/pinctrl-single.c b/trunk/drivers/pinctrl/pinctrl-single.c index b9fa04618601..5f2d2bfd356e 100644 --- a/trunk/drivers/pinctrl/pinctrl-single.c +++ b/trunk/drivers/pinctrl/pinctrl-single.c @@ -1166,8 +1166,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, (*map)->data.mux.function = np->name; if (pcs->is_pinconf) { - res = pcs_parse_pinconf(pcs, np, function, map); - if (res) + if (pcs_parse_pinconf(pcs, np, function, map)) goto free_pingroups; *num_maps = 2; } else { diff --git a/trunk/drivers/pinctrl/pinctrl-sunxi.c b/trunk/drivers/pinctrl/pinctrl-sunxi.c index b7d8c890514c..c52fc2c08732 100644 --- a/trunk/drivers/pinctrl/pinctrl-sunxi.c +++ b/trunk/drivers/pinctrl/pinctrl-sunxi.c @@ -1990,10 +1990,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev) } clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); + if (IS_ERR(clk)) goto gpiochip_error; - } clk_prepare_enable(clk); @@ -2002,8 +2000,7 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev) return 0; gpiochip_error: - if (gpiochip_remove(pctl->chip)) - dev_err(&pdev->dev, "failed to remove gpio chip\n"); + ret = gpiochip_remove(pctl->chip); pinctrl_error: pinctrl_unregister(pctl->pctl_dev); return ret; diff --git a/trunk/drivers/pinctrl/pinctrl-xway.c b/trunk/drivers/pinctrl/pinctrl-xway.c index e92132c76a6b..f2977cff8366 100644 --- a/trunk/drivers/pinctrl/pinctrl-xway.c +++ b/trunk/drivers/pinctrl/pinctrl-xway.c @@ -716,6 +716,10 @@ static int pinmux_xway_probe(struct platform_device *pdev) /* get and remap our register range */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Failed to get resource\n"); + return -ENOENT; + } xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(xway_info.membase[0])) return PTR_ERR(xway_info.membase[0]); diff --git a/trunk/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/trunk/drivers/pinctrl/sh-pfc/pfc-r8a7779.c index 8cd90e7e945a..791a6719d8a9 100644 --- a/trunk/drivers/pinctrl/sh-pfc/pfc-r8a7779.c +++ b/trunk/drivers/pinctrl/sh-pfc/pfc-r8a7779.c @@ -2357,48 +2357,27 @@ static const unsigned int sdhi3_wp_mux[] = { }; /* - USB0 ------------------------------------------------------------------- */ static const unsigned int usb0_pins[] = { - /* PENC */ - 154, + /* OVC */ + 150, 154, }; static const unsigned int usb0_mux[] = { - USB_PENC0_MARK, -}; -static const unsigned int usb0_ovc_pins[] = { - /* USB_OVC */ - 150 -}; -static const unsigned int usb0_ovc_mux[] = { - USB_OVC0_MARK, + USB_OVC0_MARK, USB_PENC0_MARK, }; /* - USB1 ------------------------------------------------------------------- */ static const unsigned int usb1_pins[] = { - /* PENC */ - 155, + /* OVC */ + 152, 155, }; static const unsigned int usb1_mux[] = { - USB_PENC1_MARK, -}; -static const unsigned int usb1_ovc_pins[] = { - /* USB_OVC */ - 152, -}; -static const unsigned int usb1_ovc_mux[] = { - USB_OVC1_MARK, + USB_OVC1_MARK, USB_PENC1_MARK, }; /* - USB2 ------------------------------------------------------------------- */ static const unsigned int usb2_pins[] = { - /* PENC */ - 156, + /* OVC, PENC */ + 125, 156, }; static const unsigned int usb2_mux[] = { - USB_PENC2_MARK, -}; -static const unsigned int usb2_ovc_pins[] = { - /* USB_OVC */ - 125, -}; -static const unsigned int usb2_ovc_mux[] = { - USB_OVC2_MARK, + USB_OVC2_MARK, USB_PENC2_MARK, }; static const struct sh_pfc_pin_group pinmux_groups[] = { @@ -2522,11 +2501,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(sdhi3_cd), SH_PFC_PIN_GROUP(sdhi3_wp), SH_PFC_PIN_GROUP(usb0), - SH_PFC_PIN_GROUP(usb0_ovc), SH_PFC_PIN_GROUP(usb1), - SH_PFC_PIN_GROUP(usb1_ovc), SH_PFC_PIN_GROUP(usb2), - SH_PFC_PIN_GROUP(usb2_ovc), }; static const char * const du0_groups[] = { @@ -2707,17 +2683,14 @@ static const char * const sdhi3_groups[] = { static const char * const usb0_groups[] = { "usb0", - "usb0_ovc", }; static const char * const usb1_groups[] = { "usb1", - "usb1_ovc", }; static const char * const usb2_groups[] = { "usb2", - "usb2_ovc", }; static const struct sh_pfc_function pinmux_functions[] = { diff --git a/trunk/drivers/pinctrl/vt8500/pinctrl-wm8750.c b/trunk/drivers/pinctrl/vt8500/pinctrl-wm8750.c index de43262398db..b964cc550568 100644 --- a/trunk/drivers/pinctrl/vt8500/pinctrl-wm8750.c +++ b/trunk/drivers/pinctrl/vt8500/pinctrl-wm8750.c @@ -53,7 +53,7 @@ static const struct wmt_pinctrl_bank_registers wm8750_banks[] = { #define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6) #define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7) #define WMT_PIN_WAKEUP0 WMT_PIN(0, 16) -#define WMT_PIN_WAKEUP1 WMT_PIN(0, 17) +#define WMT_PIN_WAKEUP1 WMT_PIN(0, 16) #define WMT_PIN_SD0CD WMT_PIN(0, 28) #define WMT_PIN_VDOUT0 WMT_PIN(1, 0) #define WMT_PIN_VDOUT1 WMT_PIN(1, 1) diff --git a/trunk/drivers/pinctrl/vt8500/pinctrl-wmt.c b/trunk/drivers/pinctrl/vt8500/pinctrl-wmt.c index 70d986e04afb..ab63104e8dc9 100644 --- a/trunk/drivers/pinctrl/vt8500/pinctrl-wmt.c +++ b/trunk/drivers/pinctrl/vt8500/pinctrl-wmt.c @@ -609,7 +609,8 @@ int wmt_pinctrl_probe(struct platform_device *pdev, return 0; fail_range: - if (gpiochip_remove(&data->gpio_chip)) + err = gpiochip_remove(&data->gpio_chip); + if (err) dev_err(&pdev->dev, "failed to remove gpio chip\n"); fail_gpio: pinctrl_unregister(data->pctl_dev); diff --git a/trunk/drivers/platform/x86/hp-wmi.c b/trunk/drivers/platform/x86/hp-wmi.c index d111c8687f9b..8df0c5a21be2 100644 --- a/trunk/drivers/platform/x86/hp-wmi.c +++ b/trunk/drivers/platform/x86/hp-wmi.c @@ -703,7 +703,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device) } rfkill_init_sw_state(gps_rfkill, hp_wmi_get_sw_state(HPWMI_GPS)); - rfkill_set_hw_state(gps_rfkill, + rfkill_set_hw_state(bluetooth_rfkill, hp_wmi_get_hw_state(HPWMI_GPS)); err = rfkill_register(gps_rfkill); if (err) diff --git a/trunk/drivers/power/Kconfig b/trunk/drivers/power/Kconfig index 7b8979c63f48..0d0b5d7d19d0 100644 --- a/trunk/drivers/power/Kconfig +++ b/trunk/drivers/power/Kconfig @@ -152,7 +152,6 @@ config BATTERY_SBS config BATTERY_BQ27x00 tristate "BQ27x00 battery driver" - depends on I2C || I2C=n help Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips. @@ -285,7 +284,6 @@ config CHARGER_LP8788 tristate "TI LP8788 charger driver" depends on MFD_LP8788 depends on LP8788_ADC - depends on IIO help Say Y to enable support for the LP8788 linear charger. diff --git a/trunk/drivers/power/pm2301_charger.c b/trunk/drivers/power/pm2301_charger.c index fef56e2041b3..a44175139bbf 100644 --- a/trunk/drivers/power/pm2301_charger.c +++ b/trunk/drivers/power/pm2301_charger.c @@ -1269,5 +1269,5 @@ module_exit(pm2xxx_charger_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay"); -MODULE_ALIAS("i2c:pm2xxx-charger"); +MODULE_ALIAS("platform:pm2xxx-charger"); MODULE_DESCRIPTION("PM2xxx charger management driver"); diff --git a/trunk/drivers/power/wm831x_backup.c b/trunk/drivers/power/wm831x_backup.c index 56fb509f4be0..58cbb009b74f 100644 --- a/trunk/drivers/power/wm831x_backup.c +++ b/trunk/drivers/power/wm831x_backup.c @@ -207,6 +207,7 @@ static int wm831x_backup_remove(struct platform_device *pdev) struct wm831x_backup *devdata = platform_get_drvdata(pdev); power_supply_unregister(&devdata->backup); + kfree(devdata->backup.name); return 0; } diff --git a/trunk/drivers/ptp/ptp_pch.c b/trunk/drivers/ptp/ptp_pch.c index 71a2559278d7..bea94510ad2d 100644 --- a/trunk/drivers/ptp/ptp_pch.c +++ b/trunk/drivers/ptp/ptp_pch.c @@ -628,10 +628,9 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id) chip->caps = ptp_pch_caps; chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev); - if (IS_ERR(chip->ptp_clock)) { - ret = PTR_ERR(chip->ptp_clock); - goto err_ptp_clock_reg; - } + + if (IS_ERR(chip->ptp_clock)) + return PTR_ERR(chip->ptp_clock); spin_lock_init(&chip->register_lock); @@ -670,7 +669,6 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_req_irq: ptp_clock_unregister(chip->ptp_clock); -err_ptp_clock_reg: iounmap(chip->regs); chip->regs = NULL; diff --git a/trunk/drivers/pwm/pwm-imx.c b/trunk/drivers/pwm/pwm-imx.c index c938bae18812..ec287989eafc 100644 --- a/trunk/drivers/pwm/pwm-imx.c +++ b/trunk/drivers/pwm/pwm-imx.c @@ -265,6 +265,11 @@ static int imx_pwm_probe(struct platform_device *pdev) imx->chip.npwm = 1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(imx->mmio_base)) return PTR_ERR(imx->mmio_base); diff --git a/trunk/drivers/pwm/pwm-puv3.c b/trunk/drivers/pwm/pwm-puv3.c index ed6007b27585..d1eb499fb15d 100644 --- a/trunk/drivers/pwm/pwm-puv3.c +++ b/trunk/drivers/pwm/pwm-puv3.c @@ -117,6 +117,11 @@ static int pwm_probe(struct platform_device *pdev) return PTR_ERR(puv3->clk); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + puv3->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(puv3->base)) return PTR_ERR(puv3->base); diff --git a/trunk/drivers/pwm/pwm-pxa.c b/trunk/drivers/pwm/pwm-pxa.c index dc9717551d39..dee6ab552a0a 100644 --- a/trunk/drivers/pwm/pwm-pxa.c +++ b/trunk/drivers/pwm/pwm-pxa.c @@ -147,6 +147,11 @@ static int pwm_probe(struct platform_device *pdev) pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pwm->mmio_base)) return PTR_ERR(pwm->mmio_base); diff --git a/trunk/drivers/pwm/pwm-tegra.c b/trunk/drivers/pwm/pwm-tegra.c index a5402933001f..3d75f4a88f98 100644 --- a/trunk/drivers/pwm/pwm-tegra.c +++ b/trunk/drivers/pwm/pwm-tegra.c @@ -181,6 +181,11 @@ static int tegra_pwm_probe(struct platform_device *pdev) pwm->dev = &pdev->dev; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no memory resources defined\n"); + return -ENODEV; + } + pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pwm->mmio_base)) return PTR_ERR(pwm->mmio_base); diff --git a/trunk/drivers/pwm/pwm-tiecap.c b/trunk/drivers/pwm/pwm-tiecap.c index 72ca42dfa733..0d65fb2e02c7 100644 --- a/trunk/drivers/pwm/pwm-tiecap.c +++ b/trunk/drivers/pwm/pwm-tiecap.c @@ -240,6 +240,11 @@ static int ecap_pwm_probe(struct platform_device *pdev) pc->chip.npwm = 1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pc->mmio_base)) return PTR_ERR(pc->mmio_base); diff --git a/trunk/drivers/pwm/pwm-tiehrpwm.c b/trunk/drivers/pwm/pwm-tiehrpwm.c index 48a485c2e422..6a217596942f 100644 --- a/trunk/drivers/pwm/pwm-tiehrpwm.c +++ b/trunk/drivers/pwm/pwm-tiehrpwm.c @@ -471,6 +471,11 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev) pc->chip.npwm = NUM_PWM_CHANNEL; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pc->mmio_base)) return PTR_ERR(pc->mmio_base); diff --git a/trunk/drivers/pwm/pwm-tipwmss.c b/trunk/drivers/pwm/pwm-tipwmss.c index 3b119bc2c3c6..c9c3d3a1e0eb 100644 --- a/trunk/drivers/pwm/pwm-tipwmss.c +++ b/trunk/drivers/pwm/pwm-tipwmss.c @@ -70,6 +70,11 @@ static int pwmss_probe(struct platform_device *pdev) mutex_init(&info->pwmss_lock); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + info->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(info->mmio_base)) return PTR_ERR(info->mmio_base); diff --git a/trunk/drivers/pwm/pwm-vt8500.c b/trunk/drivers/pwm/pwm-vt8500.c index 323125abf3f4..69effd19afc7 100644 --- a/trunk/drivers/pwm/pwm-vt8500.c +++ b/trunk/drivers/pwm/pwm-vt8500.c @@ -230,6 +230,11 @@ static int vt8500_pwm_probe(struct platform_device *pdev) } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + chip->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(chip->base)) return PTR_ERR(chip->base); diff --git a/trunk/drivers/rapidio/Kconfig b/trunk/drivers/rapidio/Kconfig index 5ab056494bbe..6194d35ebb97 100644 --- a/trunk/drivers/rapidio/Kconfig +++ b/trunk/drivers/rapidio/Kconfig @@ -47,24 +47,4 @@ config RAPIDIO_DEBUG If you are unsure about this, say N here. -choice - prompt "Enumeration method" - depends on RAPIDIO - default RAPIDIO_ENUM_BASIC - help - There are different enumeration and discovery mechanisms offered - for RapidIO subsystem. You may select single built-in method or - or any number of methods to be built as modules. - Selecting a built-in method disables use of loadable methods. - - If unsure, select Basic built-in. - -config RAPIDIO_ENUM_BASIC - tristate "Basic" - help - This option includes basic RapidIO fabric enumeration and discovery - mechanism similar to one described in RapidIO specification Annex 1. - -endchoice - source "drivers/rapidio/switches/Kconfig" diff --git a/trunk/drivers/rapidio/Makefile b/trunk/drivers/rapidio/Makefile index 3036702ffe8b..ec3fb8121004 100644 --- a/trunk/drivers/rapidio/Makefile +++ b/trunk/drivers/rapidio/Makefile @@ -1,8 +1,7 @@ # # Makefile for RapidIO interconnect services # -obj-y += rio.o rio-access.o rio-driver.o rio-sysfs.o -obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o +obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o obj-$(CONFIG_RAPIDIO) += switches/ obj-$(CONFIG_RAPIDIO) += devices/ diff --git a/trunk/drivers/rapidio/devices/tsi721.c b/trunk/drivers/rapidio/devices/tsi721.c index a8b2c23a7ef4..6faba406b6e9 100644 --- a/trunk/drivers/rapidio/devices/tsi721.c +++ b/trunk/drivers/rapidio/devices/tsi721.c @@ -471,10 +471,6 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) u32 intval; u32 ch_inte; - /* For MSI mode disable all device-level interrupts */ - if (priv->flags & TSI721_USING_MSI) - iowrite32(0, priv->regs + TSI721_DEV_INTE); - dev_int = ioread32(priv->regs + TSI721_DEV_INT); if (!dev_int) return IRQ_NONE; @@ -564,14 +560,6 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) } } #endif - - /* For MSI mode re-enable device-level interrupts */ - if (priv->flags & TSI721_USING_MSI) { - dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | - TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; - iowrite32(dev_int, priv->regs + TSI721_DEV_INTE); - } - return IRQ_HANDLED; } diff --git a/trunk/drivers/rapidio/rio-driver.c b/trunk/drivers/rapidio/rio-driver.c index a0c875563d76..0f4a53bdaa3c 100644 --- a/trunk/drivers/rapidio/rio-driver.c +++ b/trunk/drivers/rapidio/rio-driver.c @@ -164,13 +164,6 @@ void rio_unregister_driver(struct rio_driver *rdrv) driver_unregister(&rdrv->driver); } -void rio_attach_device(struct rio_dev *rdev) -{ - rdev->dev.bus = &rio_bus_type; - rdev->dev.parent = &rio_bus; -} -EXPORT_SYMBOL_GPL(rio_attach_device); - /** * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure * @dev: the standard device structure to match against @@ -207,7 +200,6 @@ struct bus_type rio_bus_type = { .name = "rapidio", .match = rio_match_bus, .dev_attrs = rio_dev_attrs, - .bus_attrs = rio_bus_attrs, .probe = rio_device_probe, .remove = rio_device_remove, }; diff --git a/trunk/drivers/rapidio/rio-scan.c b/trunk/drivers/rapidio/rio-scan.c index 4c15dbf81087..a965acd3c0e4 100644 --- a/trunk/drivers/rapidio/rio-scan.c +++ b/trunk/drivers/rapidio/rio-scan.c @@ -37,8 +37,12 @@ #include "rio.h" +LIST_HEAD(rio_devices); + static void rio_init_em(struct rio_dev *rdev); +DEFINE_SPINLOCK(rio_global_list_lock); + static int next_destid = 0; static int next_comptag = 1; @@ -322,6 +326,127 @@ static int rio_is_switch(struct rio_dev *rdev) return 0; } +/** + * rio_switch_init - Sets switch operations for a particular vendor switch + * @rdev: RIO device + * @do_enum: Enumeration/Discovery mode flag + * + * Searches the RIO switch ops table for known switch types. If the vid + * and did match a switch table entry, then call switch initialization + * routine to setup switch-specific routines. + */ +static void rio_switch_init(struct rio_dev *rdev, int do_enum) +{ + struct rio_switch_ops *cur = __start_rio_switch_ops; + struct rio_switch_ops *end = __end_rio_switch_ops; + + while (cur < end) { + if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { + pr_debug("RIO: calling init routine for %s\n", + rio_name(rdev)); + cur->init_hook(rdev, do_enum); + break; + } + cur++; + } + + if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { + pr_debug("RIO: adding STD routing ops for %s\n", + rio_name(rdev)); + rdev->rswitch->add_entry = rio_std_route_add_entry; + rdev->rswitch->get_entry = rio_std_route_get_entry; + rdev->rswitch->clr_table = rio_std_route_clr_table; + } + + if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) + printk(KERN_ERR "RIO: missing routing ops for %s\n", + rio_name(rdev)); +} + +/** + * rio_add_device- Adds a RIO device to the device model + * @rdev: RIO device + * + * Adds the RIO device to the global device list and adds the RIO + * device to the RIO device list. Creates the generic sysfs nodes + * for an RIO device. + */ +static int rio_add_device(struct rio_dev *rdev) +{ + int err; + + err = device_add(&rdev->dev); + if (err) + return err; + + spin_lock(&rio_global_list_lock); + list_add_tail(&rdev->global_list, &rio_devices); + spin_unlock(&rio_global_list_lock); + + rio_create_sysfs_dev_files(rdev); + + return 0; +} + +/** + * rio_enable_rx_tx_port - enable input receiver and output transmitter of + * given port + * @port: Master port associated with the RIO network + * @local: local=1 select local port otherwise a far device is reached + * @destid: Destination ID of the device to check host bit + * @hopcount: Number of hops to reach the target + * @port_num: Port (-number on switch) to enable on a far end device + * + * Returns 0 or 1 from on General Control Command and Status Register + * (EXT_PTR+0x3C) + */ +inline int rio_enable_rx_tx_port(struct rio_mport *port, + int local, u16 destid, + u8 hopcount, u8 port_num) { +#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS + u32 regval; + u32 ext_ftr_ptr; + + /* + * enable rx input tx output port + */ + pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " + "%d, port_num = %d)\n", local, destid, hopcount, port_num); + + ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); + + if (local) { + rio_local_read_config_32(port, ext_ftr_ptr + + RIO_PORT_N_CTL_CSR(0), + ®val); + } else { + if (rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) + return -EIO; + } + + if (regval & RIO_PORT_N_CTL_P_TYP_SER) { + /* serial */ + regval = regval | RIO_PORT_N_CTL_EN_RX_SER + | RIO_PORT_N_CTL_EN_TX_SER; + } else { + /* parallel */ + regval = regval | RIO_PORT_N_CTL_EN_RX_PAR + | RIO_PORT_N_CTL_EN_TX_PAR; + } + + if (local) { + rio_local_write_config_32(port, ext_ftr_ptr + + RIO_PORT_N_CTL_CSR(0), regval); + } else { + if (rio_mport_write_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) + return -EIO; + } +#endif + return 0; +} + /** * rio_setup_device- Allocates and sets up a RIO device * @net: RIO network @@ -462,7 +587,8 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, rdev->destid); } - rio_attach_device(rdev); + rdev->dev.bus = &rio_bus_type; + rdev->dev.parent = &rio_bus; device_initialize(&rdev->dev); rdev->dev.release = rio_release_dev; @@ -1134,30 +1260,19 @@ static void rio_pw_enable(struct rio_mport *port, int enable) /** * rio_enum_mport- Start enumeration through a master port * @mport: Master port to send transactions - * @flags: Enumeration control flags * * Starts the enumeration process. If somebody has enumerated our * master port device, then give up. If not and we have an active * link, then start recursive peer enumeration. Returns %0 if * enumeration succeeds or %-EBUSY if enumeration fails. */ -int rio_enum_mport(struct rio_mport *mport, u32 flags) +int rio_enum_mport(struct rio_mport *mport) { struct rio_net *net = NULL; int rc = 0; printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id, mport->name); - - /* - * To avoid multiple start requests (repeat enumeration is not supported - * by this method) check if enumeration/discovery was performed for this - * mport: if mport was added into the list of mports for a net exit - * with error. - */ - if (mport->nnode.next || mport->nnode.prev) - return -EBUSY; - /* If somebody else enumerated our master port device, bail. */ if (rio_enum_host(mport) < 0) { printk(KERN_INFO @@ -1247,16 +1362,14 @@ static void rio_build_route_tables(struct rio_net *net) /** * rio_disc_mport- Start discovery through a master port * @mport: Master port to send transactions - * @flags: discovery control flags * * Starts the discovery process. If we have an active link, - * then wait for the signal that enumeration is complete (if wait - * is allowed). + * then wait for the signal that enumeration is complete. * When enumeration completion is signaled, start recursive * peer discovery. Returns %0 if discovery succeeds or %-EBUSY * on failure. */ -int rio_disc_mport(struct rio_mport *mport, u32 flags) +int rio_disc_mport(struct rio_mport *mport) { struct rio_net *net = NULL; unsigned long to_end; @@ -1266,11 +1379,6 @@ int rio_disc_mport(struct rio_mport *mport, u32 flags) /* If master port has an active link, allocate net and discover peers */ if (rio_mport_is_active(mport)) { - if (rio_enum_complete(mport)) - goto enum_done; - else if (flags & RIO_SCAN_ENUM_NO_WAIT) - return -EAGAIN; - pr_debug("RIO: wait for enumeration to complete...\n"); to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; @@ -1313,41 +1421,3 @@ int rio_disc_mport(struct rio_mport *mport, u32 flags) bail: return -EBUSY; } - -static struct rio_scan rio_scan_ops = { - .enumerate = rio_enum_mport, - .discover = rio_disc_mport, -}; - -static bool scan; -module_param(scan, bool, 0); -MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery " - "(default = 0)"); - -/** - * rio_basic_attach: - * - * When this enumeration/discovery method is loaded as a module this function - * registers its specific enumeration and discover routines for all available - * RapidIO mport devices. The "scan" command line parameter controls ability of - * the module to start RapidIO enumeration/discovery automatically. - * - * Returns 0 for success or -EIO if unable to register itself. - * - * This enumeration/discovery method cannot be unloaded and therefore does not - * provide a matching cleanup_module routine. - */ - -static int __init rio_basic_attach(void) -{ - if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops)) - return -EIO; - if (scan) - rio_init_mports(); - return 0; -} - -late_initcall(rio_basic_attach); - -MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/rapidio/rio-sysfs.c b/trunk/drivers/rapidio/rio-sysfs.c index 66d4acd5e18f..4dbe360989be 100644 --- a/trunk/drivers/rapidio/rio-sysfs.c +++ b/trunk/drivers/rapidio/rio-sysfs.c @@ -285,48 +285,3 @@ void rio_remove_sysfs_dev_files(struct rio_dev *rdev) rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE); } } - -static ssize_t bus_scan_store(struct bus_type *bus, const char *buf, - size_t count) -{ - long val; - struct rio_mport *port = NULL; - int rc; - - if (kstrtol(buf, 0, &val) < 0) - return -EINVAL; - - if (val == RIO_MPORT_ANY) { - rc = rio_init_mports(); - goto exit; - } - - if (val < 0 || val >= RIO_MAX_MPORTS) - return -EINVAL; - - port = rio_find_mport((int)val); - - if (!port) { - pr_debug("RIO: %s: mport_%d not available\n", - __func__, (int)val); - return -EINVAL; - } - - if (!port->nscan) - return -EINVAL; - - if (port->host_deviceid >= 0) - rc = port->nscan->enumerate(port, 0); - else - rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT); -exit: - if (!rc) - rc = count; - - return rc; -} - -struct bus_attribute rio_bus_attrs[] = { - __ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store), - __ATTR_NULL -}; diff --git a/trunk/drivers/rapidio/rio.c b/trunk/drivers/rapidio/rio.c index cb1c08996fbb..d553b5d13722 100644 --- a/trunk/drivers/rapidio/rio.c +++ b/trunk/drivers/rapidio/rio.c @@ -31,11 +31,7 @@ #include "rio.h" -static LIST_HEAD(rio_devices); -static DEFINE_SPINLOCK(rio_global_list_lock); - static LIST_HEAD(rio_mports); -static DEFINE_MUTEX(rio_mport_list_lock); static unsigned char next_portid; static DEFINE_SPINLOCK(rio_mmap_lock); @@ -56,32 +52,6 @@ u16 rio_local_get_device_id(struct rio_mport *port) return (RIO_GET_DID(port->sys_size, result)); } -/** - * rio_add_device- Adds a RIO device to the device model - * @rdev: RIO device - * - * Adds the RIO device to the global device list and adds the RIO - * device to the RIO device list. Creates the generic sysfs nodes - * for an RIO device. - */ -int rio_add_device(struct rio_dev *rdev) -{ - int err; - - err = device_add(&rdev->dev); - if (err) - return err; - - spin_lock(&rio_global_list_lock); - list_add_tail(&rdev->global_list, &rio_devices); - spin_unlock(&rio_global_list_lock); - - rio_create_sysfs_dev_files(rdev); - - return 0; -} -EXPORT_SYMBOL_GPL(rio_add_device); - /** * rio_request_inb_mbox - request inbound mailbox service * @mport: RIO master port from which to allocate the mailbox resource @@ -519,7 +489,6 @@ rio_mport_get_physefb(struct rio_mport *port, int local, return ext_ftr_ptr; } -EXPORT_SYMBOL_GPL(rio_mport_get_physefb); /** * rio_get_comptag - Begin or continue searching for a RIO device by component tag @@ -552,7 +521,6 @@ struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from) spin_unlock(&rio_global_list_lock); return rdev; } -EXPORT_SYMBOL_GPL(rio_get_comptag); /** * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. @@ -577,107 +545,6 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) regval); return 0; } -EXPORT_SYMBOL_GPL(rio_set_port_lockout); - -/** - * rio_switch_init - Sets switch operations for a particular vendor switch - * @rdev: RIO device - * @do_enum: Enumeration/Discovery mode flag - * - * Searches the RIO switch ops table for known switch types. If the vid - * and did match a switch table entry, then call switch initialization - * routine to setup switch-specific routines. - */ -void rio_switch_init(struct rio_dev *rdev, int do_enum) -{ - struct rio_switch_ops *cur = __start_rio_switch_ops; - struct rio_switch_ops *end = __end_rio_switch_ops; - - while (cur < end) { - if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { - pr_debug("RIO: calling init routine for %s\n", - rio_name(rdev)); - cur->init_hook(rdev, do_enum); - break; - } - cur++; - } - - if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { - pr_debug("RIO: adding STD routing ops for %s\n", - rio_name(rdev)); - rdev->rswitch->add_entry = rio_std_route_add_entry; - rdev->rswitch->get_entry = rio_std_route_get_entry; - rdev->rswitch->clr_table = rio_std_route_clr_table; - } - - if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) - printk(KERN_ERR "RIO: missing routing ops for %s\n", - rio_name(rdev)); -} -EXPORT_SYMBOL_GPL(rio_switch_init); - -/** - * rio_enable_rx_tx_port - enable input receiver and output transmitter of - * given port - * @port: Master port associated with the RIO network - * @local: local=1 select local port otherwise a far device is reached - * @destid: Destination ID of the device to check host bit - * @hopcount: Number of hops to reach the target - * @port_num: Port (-number on switch) to enable on a far end device - * - * Returns 0 or 1 from on General Control Command and Status Register - * (EXT_PTR+0x3C) - */ -int rio_enable_rx_tx_port(struct rio_mport *port, - int local, u16 destid, - u8 hopcount, u8 port_num) -{ -#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS - u32 regval; - u32 ext_ftr_ptr; - - /* - * enable rx input tx output port - */ - pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " - "%d, port_num = %d)\n", local, destid, hopcount, port_num); - - ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); - - if (local) { - rio_local_read_config_32(port, ext_ftr_ptr + - RIO_PORT_N_CTL_CSR(0), - ®val); - } else { - if (rio_mport_read_config_32(port, destid, hopcount, - ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) - return -EIO; - } - - if (regval & RIO_PORT_N_CTL_P_TYP_SER) { - /* serial */ - regval = regval | RIO_PORT_N_CTL_EN_RX_SER - | RIO_PORT_N_CTL_EN_TX_SER; - } else { - /* parallel */ - regval = regval | RIO_PORT_N_CTL_EN_RX_PAR - | RIO_PORT_N_CTL_EN_TX_PAR; - } - - if (local) { - rio_local_write_config_32(port, ext_ftr_ptr + - RIO_PORT_N_CTL_CSR(0), regval); - } else { - if (rio_mport_write_config_32(port, destid, hopcount, - ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) - return -EIO; - } -#endif - return 0; -} -EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port); - /** * rio_chk_dev_route - Validate route to the specified device. @@ -743,7 +610,6 @@ rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount) return 0; } -EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access); /** * rio_chk_dev_access - Validate access to the specified device. @@ -1075,7 +941,6 @@ rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, return RIO_GET_BLOCK_ID(reg_val); } } -EXPORT_SYMBOL_GPL(rio_mport_get_efb); /** * rio_mport_get_feature - query for devices' extended features @@ -1132,7 +997,6 @@ rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, return 0; } -EXPORT_SYMBOL_GPL(rio_mport_get_feature); /** * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did @@ -1382,95 +1246,6 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ -/** - * rio_find_mport - find RIO mport by its ID - * @mport_id: number (ID) of mport device - * - * Given a RIO mport number, the desired mport is located - * in the global list of mports. If the mport is found, a pointer to its - * data structure is returned. If no mport is found, %NULL is returned. - */ -struct rio_mport *rio_find_mport(int mport_id) -{ - struct rio_mport *port; - - mutex_lock(&rio_mport_list_lock); - list_for_each_entry(port, &rio_mports, node) { - if (port->id == mport_id) - goto found; - } - port = NULL; -found: - mutex_unlock(&rio_mport_list_lock); - - return port; -} - -/** - * rio_register_scan - enumeration/discovery method registration interface - * @mport_id: mport device ID for which fabric scan routine has to be set - * (RIO_MPORT_ANY = set for all available mports) - * @scan_ops: enumeration/discovery control structure - * - * Assigns enumeration or discovery method to the specified mport device (or all - * available mports if RIO_MPORT_ANY is specified). - * Returns error if the mport already has an enumerator attached to it. - * In case of RIO_MPORT_ANY ignores ports with valid scan routines and returns - * an error if was unable to find at least one available mport. - */ -int rio_register_scan(int mport_id, struct rio_scan *scan_ops) -{ - struct rio_mport *port; - int rc = -EBUSY; - - mutex_lock(&rio_mport_list_lock); - list_for_each_entry(port, &rio_mports, node) { - if (port->id == mport_id || mport_id == RIO_MPORT_ANY) { - if (port->nscan && mport_id == RIO_MPORT_ANY) - continue; - else if (port->nscan) - break; - - port->nscan = scan_ops; - rc = 0; - - if (mport_id != RIO_MPORT_ANY) - break; - } - } - mutex_unlock(&rio_mport_list_lock); - - return rc; -} -EXPORT_SYMBOL_GPL(rio_register_scan); - -/** - * rio_unregister_scan - removes enumeration/discovery method from mport - * @mport_id: mport device ID for which fabric scan routine has to be - * unregistered (RIO_MPORT_ANY = set for all available mports) - * - * Removes enumeration or discovery method assigned to the specified mport - * device (or all available mports if RIO_MPORT_ANY is specified). - */ -int rio_unregister_scan(int mport_id) -{ - struct rio_mport *port; - - mutex_lock(&rio_mport_list_lock); - list_for_each_entry(port, &rio_mports, node) { - if (port->id == mport_id || mport_id == RIO_MPORT_ANY) { - if (port->nscan) - port->nscan = NULL; - if (mport_id != RIO_MPORT_ANY) - break; - } - } - mutex_unlock(&rio_mport_list_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(rio_unregister_scan); - static void rio_fixup_device(struct rio_dev *dev) { } @@ -1499,7 +1274,7 @@ static void disc_work_handler(struct work_struct *_work) work = container_of(_work, struct rio_disc_work, work); pr_debug("RIO: discovery work for mport %d %s\n", work->mport->id, work->mport->name); - work->mport->nscan->discover(work->mport, 0); + rio_disc_mport(work->mport); } int rio_init_mports(void) @@ -1515,15 +1290,12 @@ int rio_init_mports(void) * First, run enumerations and check if we need to perform discovery * on any of the registered mports. */ - mutex_lock(&rio_mport_list_lock); list_for_each_entry(port, &rio_mports, node) { - if (port->host_deviceid >= 0) { - if (port->nscan) - port->nscan->enumerate(port, 0); - } else + if (port->host_deviceid >= 0) + rio_enum_mport(port); + else n++; } - mutex_unlock(&rio_mport_list_lock); if (!n) goto no_disc; @@ -1550,16 +1322,14 @@ int rio_init_mports(void) } n = 0; - mutex_lock(&rio_mport_list_lock); list_for_each_entry(port, &rio_mports, node) { - if (port->host_deviceid < 0 && port->nscan) { + if (port->host_deviceid < 0) { work[n].mport = port; INIT_WORK(&work[n].work, disc_work_handler); queue_work(rio_wq, &work[n].work); n++; } } - mutex_unlock(&rio_mport_list_lock); flush_workqueue(rio_wq); pr_debug("RIO: destroy discovery workqueue\n"); @@ -1572,6 +1342,8 @@ int rio_init_mports(void) return 0; } +device_initcall_sync(rio_init_mports); + static int hdids[RIO_MAX_MPORTS + 1]; static int rio_get_hdid(int index) @@ -1599,10 +1371,7 @@ int rio_register_mport(struct rio_mport *port) port->id = next_portid++; port->host_deviceid = rio_get_hdid(port->id); - port->nscan = NULL; - mutex_lock(&rio_mport_list_lock); list_add_tail(&port->node, &rio_mports); - mutex_unlock(&rio_mport_list_lock); return 0; } @@ -1617,4 +1386,3 @@ EXPORT_SYMBOL_GPL(rio_request_inb_mbox); EXPORT_SYMBOL_GPL(rio_release_inb_mbox); EXPORT_SYMBOL_GPL(rio_request_outb_mbox); EXPORT_SYMBOL_GPL(rio_release_outb_mbox); -EXPORT_SYMBOL_GPL(rio_init_mports); diff --git a/trunk/drivers/rapidio/rio.h b/trunk/drivers/rapidio/rio.h index c14f864dea5c..b1af414f15e6 100644 --- a/trunk/drivers/rapidio/rio.h +++ b/trunk/drivers/rapidio/rio.h @@ -15,7 +15,6 @@ #include #define RIO_MAX_CHK_RETRY 3 -#define RIO_MPORT_ANY (-1) /* Functions internal to the RIO core code */ @@ -28,6 +27,8 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount); extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); +extern int rio_enum_mport(struct rio_mport *mport); +extern int rio_disc_mport(struct rio_mport *mport); extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port); @@ -38,18 +39,10 @@ extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table); extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); -extern int rio_add_device(struct rio_dev *rdev); -extern void rio_switch_init(struct rio_dev *rdev, int do_enum); -extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, - u8 hopcount, u8 port_num); -extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); -extern int rio_unregister_scan(int mport_id); -extern void rio_attach_device(struct rio_dev *rdev); -extern struct rio_mport *rio_find_mport(int mport_id); /* Structures internal to the RIO core code */ extern struct device_attribute rio_dev_attrs[]; -extern struct bus_attribute rio_bus_attrs[]; +extern spinlock_t rio_global_list_lock; extern struct rio_switch_ops __start_rio_switch_ops[]; extern struct rio_switch_ops __end_rio_switch_ops[]; diff --git a/trunk/drivers/regulator/core.c b/trunk/drivers/regulator/core.c index 815d6df8bd5f..6e5017841582 100644 --- a/trunk/drivers/regulator/core.c +++ b/trunk/drivers/regulator/core.c @@ -1539,10 +1539,7 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev) } /** - * regulator_ena_gpio_ctrl - balance enable_count of each GPIO and actual GPIO pin control - * @rdev: regulator_dev structure - * @enable: enable GPIO at initial use? - * + * Balance enable_count of each GPIO and actual GPIO pin control. * GPIO is enabled in case of initial use. (enable_count is 0) * GPIO is disabled when it is not shared any more. (enable_count <= 1) */ @@ -2705,7 +2702,7 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage); /** * regulator_set_current_limit - set regulator output current limit * @regulator: regulator source - * @min_uA: Minimum supported current in uA + * @min_uA: Minimuum supported current in uA * @max_uA: Maximum supported current in uA * * Sets current sink to the desired output current. This can be set during diff --git a/trunk/drivers/regulator/dbx500-prcmu.c b/trunk/drivers/regulator/dbx500-prcmu.c index ce89f7848a57..89bd2faaef8c 100644 --- a/trunk/drivers/regulator/dbx500-prcmu.c +++ b/trunk/drivers/regulator/dbx500-prcmu.c @@ -24,6 +24,18 @@ static int power_state_active_cnt; /* will initialize to zero */ static DEFINE_SPINLOCK(power_state_active_lock); +int power_state_active_get(void) +{ + unsigned long flags; + int cnt; + + spin_lock_irqsave(&power_state_active_lock, flags); + cnt = power_state_active_cnt; + spin_unlock_irqrestore(&power_state_active_lock, flags); + + return cnt; +} + void power_state_active_enable(void) { unsigned long flags; @@ -53,18 +65,6 @@ int power_state_active_disable(void) #ifdef CONFIG_REGULATOR_DEBUG -static int power_state_active_get(void) -{ - unsigned long flags; - int cnt; - - spin_lock_irqsave(&power_state_active_lock, flags); - cnt = power_state_active_cnt; - spin_unlock_irqrestore(&power_state_active_lock, flags); - - return cnt; -} - static struct ux500_regulator_debug { struct dentry *dir; struct dentry *status_file; diff --git a/trunk/drivers/regulator/palmas-regulator.c b/trunk/drivers/regulator/palmas-regulator.c index 3ae44ac12a94..92ceed0fc65e 100644 --- a/trunk/drivers/regulator/palmas-regulator.c +++ b/trunk/drivers/regulator/palmas-regulator.c @@ -840,7 +840,7 @@ static int palmas_regulators_probe(struct platform_device *pdev) break; } - if ((id == PALMAS_REG_SMPS6) || (id == PALMAS_REG_SMPS8)) + if ((id == PALMAS_REG_SMPS6) && (id == PALMAS_REG_SMPS8)) ramp_delay_support = true; if (ramp_delay_support) { @@ -878,7 +878,7 @@ static int palmas_regulators_probe(struct platform_device *pdev) pmic->desc[id].vsel_mask = SMPS10_VSEL; pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, - PALMAS_SMPS10_CTRL); + PALMAS_SMPS10_STATUS); pmic->desc[id].enable_mask = SMPS10_BOOST_EN; pmic->desc[id].min_uV = 3750000; pmic->desc[id].uV_step = 1250000; diff --git a/trunk/drivers/regulator/tps6586x-regulator.c b/trunk/drivers/regulator/tps6586x-regulator.c index 2c9155b66f09..d8fa37d5c734 100644 --- a/trunk/drivers/regulator/tps6586x-regulator.c +++ b/trunk/drivers/regulator/tps6586x-regulator.c @@ -439,7 +439,7 @@ static int tps6586x_regulator_remove(struct platform_device *pdev) static struct platform_driver tps6586x_regulator_driver = { .driver = { - .name = "tps6586x-regulator", + .name = "tps6586x-pmic", .owner = THIS_MODULE, }, .probe = tps6586x_regulator_probe, diff --git a/trunk/drivers/rtc/Kconfig b/trunk/drivers/rtc/Kconfig index b9838130a7b0..0c81915b1997 100644 --- a/trunk/drivers/rtc/Kconfig +++ b/trunk/drivers/rtc/Kconfig @@ -20,6 +20,7 @@ if RTC_CLASS config RTC_HCTOSYS bool "Set system time from RTC on startup and resume" default y + depends on !ALWAYS_USE_PERSISTENT_CLOCK help If you say yes here, the system time (wall clock) will be set using the value read from a specified RTC device. This is useful to avoid @@ -28,6 +29,7 @@ config RTC_HCTOSYS config RTC_SYSTOHC bool "Set the RTC time based on NTP synchronization" default y + depends on !ALWAYS_USE_PERSISTENT_CLOCK help If you say yes here, the system time (wall clock) will be stored in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11 diff --git a/trunk/drivers/rtc/rtc-at91rm9200.c b/trunk/drivers/rtc/rtc-at91rm9200.c index f296f3f7db9b..0eab77b22340 100644 --- a/trunk/drivers/rtc/rtc-at91rm9200.c +++ b/trunk/drivers/rtc/rtc-at91rm9200.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -43,65 +42,10 @@ #define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ -struct at91_rtc_config { - bool use_shadow_imr; -}; - -static const struct at91_rtc_config *at91_rtc_config; static DECLARE_COMPLETION(at91_rtc_updated); static unsigned int at91_alarm_year = AT91_RTC_EPOCH; static void __iomem *at91_rtc_regs; static int irq; -static DEFINE_SPINLOCK(at91_rtc_lock); -static u32 at91_rtc_shadow_imr; - -static void at91_rtc_write_ier(u32 mask) -{ - unsigned long flags; - - spin_lock_irqsave(&at91_rtc_lock, flags); - at91_rtc_shadow_imr |= mask; - at91_rtc_write(AT91_RTC_IER, mask); - spin_unlock_irqrestore(&at91_rtc_lock, flags); -} - -static void at91_rtc_write_idr(u32 mask) -{ - unsigned long flags; - - spin_lock_irqsave(&at91_rtc_lock, flags); - at91_rtc_write(AT91_RTC_IDR, mask); - /* - * Register read back (of any RTC-register) needed to make sure - * IDR-register write has reached the peripheral before updating - * shadow mask. - * - * Note that there is still a possibility that the mask is updated - * before interrupts have actually been disabled in hardware. The only - * way to be certain would be to poll the IMR-register, which is is - * the very register we are trying to emulate. The register read back - * is a reasonable heuristic. - */ - at91_rtc_read(AT91_RTC_SR); - at91_rtc_shadow_imr &= ~mask; - spin_unlock_irqrestore(&at91_rtc_lock, flags); -} - -static u32 at91_rtc_read_imr(void) -{ - unsigned long flags; - u32 mask; - - if (at91_rtc_config->use_shadow_imr) { - spin_lock_irqsave(&at91_rtc_lock, flags); - mask = at91_rtc_shadow_imr; - spin_unlock_irqrestore(&at91_rtc_lock, flags); - } else { - mask = at91_rtc_read(AT91_RTC_IMR); - } - - return mask; -} /* * Decode time/date into rtc_time structure @@ -166,9 +110,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm) cr = at91_rtc_read(AT91_RTC_CR); at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); - at91_rtc_write_ier(AT91_RTC_ACKUPD); + at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ - at91_rtc_write_idr(AT91_RTC_ACKUPD); + at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); at91_rtc_write(AT91_RTC_TIMR, bin2bcd(tm->tm_sec) << 0 @@ -200,7 +144,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); tm->tm_year = at91_alarm_year - 1900; - alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM) + alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) ? 1 : 0; dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, @@ -225,7 +169,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) tm.tm_min = alrm->time.tm_min; tm.tm_sec = alrm->time.tm_sec; - at91_rtc_write_idr(AT91_RTC_ALARM); + at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); at91_rtc_write(AT91_RTC_TIMALR, bin2bcd(tm.tm_sec) << 0 | bin2bcd(tm.tm_min) << 8 @@ -238,7 +182,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) if (alrm->enabled) { at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); - at91_rtc_write_ier(AT91_RTC_ALARM); + at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); } dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, @@ -254,9 +198,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) if (enabled) { at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); - at91_rtc_write_ier(AT91_RTC_ALARM); + at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); } else - at91_rtc_write_idr(AT91_RTC_ALARM); + at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); return 0; } @@ -265,7 +209,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) */ static int at91_rtc_proc(struct device *dev, struct seq_file *seq) { - unsigned long imr = at91_rtc_read_imr(); + unsigned long imr = at91_rtc_read(AT91_RTC_IMR); seq_printf(seq, "update_IRQ\t: %s\n", (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); @@ -285,7 +229,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) unsigned int rtsr; unsigned long events = 0; - rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr(); + rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); if (rtsr) { /* this interrupt is shared! Is it ours? */ if (rtsr & AT91_RTC_ALARM) events |= (RTC_AF | RTC_IRQF); @@ -306,43 +250,6 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) return IRQ_NONE; /* not handled */ } -static const struct at91_rtc_config at91rm9200_config = { -}; - -static const struct at91_rtc_config at91sam9x5_config = { - .use_shadow_imr = true, -}; - -#ifdef CONFIG_OF -static const struct of_device_id at91_rtc_dt_ids[] = { - { - .compatible = "atmel,at91rm9200-rtc", - .data = &at91rm9200_config, - }, { - .compatible = "atmel,at91sam9x5-rtc", - .data = &at91sam9x5_config, - }, { - /* sentinel */ - } -}; -MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids); -#endif - -static const struct at91_rtc_config * -at91_rtc_get_config(struct platform_device *pdev) -{ - const struct of_device_id *match; - - if (pdev->dev.of_node) { - match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node); - if (!match) - return NULL; - return (const struct at91_rtc_config *)match->data; - } - - return &at91rm9200_config; -} - static const struct rtc_class_ops at91_rtc_ops = { .read_time = at91_rtc_readtime, .set_time = at91_rtc_settime, @@ -361,10 +268,6 @@ static int __init at91_rtc_probe(struct platform_device *pdev) struct resource *regs; int ret = 0; - at91_rtc_config = at91_rtc_get_config(pdev); - if (!at91_rtc_config) - return -ENODEV; - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(&pdev->dev, "no mmio resource defined\n"); @@ -387,7 +290,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev) at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */ /* Disable all interrupts */ - at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM | + at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV); @@ -432,7 +335,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) struct rtc_device *rtc = platform_get_drvdata(pdev); /* Disable all interrupts */ - at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM | + at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV); free_irq(irq, pdev); @@ -455,13 +358,13 @@ static int at91_rtc_suspend(struct device *dev) /* this IRQ is shared with DBGU and other hardware which isn't * necessarily doing PM like we are... */ - at91_rtc_imr = at91_rtc_read_imr() + at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) & (AT91_RTC_ALARM|AT91_RTC_SECEV); if (at91_rtc_imr) { if (device_may_wakeup(dev)) enable_irq_wake(irq); else - at91_rtc_write_idr(at91_rtc_imr); + at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); } return 0; } @@ -472,7 +375,7 @@ static int at91_rtc_resume(struct device *dev) if (device_may_wakeup(dev)) disable_irq_wake(irq); else - at91_rtc_write_ier(at91_rtc_imr); + at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); } return 0; } @@ -480,6 +383,12 @@ static int at91_rtc_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume); +static const struct of_device_id at91_rtc_dt_ids[] = { + { .compatible = "atmel,at91rm9200-rtc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids); + static struct platform_driver at91_rtc_driver = { .remove = __exit_p(at91_rtc_remove), .driver = { diff --git a/trunk/drivers/rtc/rtc-cmos.c b/trunk/drivers/rtc/rtc-cmos.c index f1cb706445c7..cc5bea9c4b1c 100644 --- a/trunk/drivers/rtc/rtc-cmos.c +++ b/trunk/drivers/rtc/rtc-cmos.c @@ -854,9 +854,6 @@ static int cmos_resume(struct device *dev) } spin_lock_irq(&rtc_lock); - if (device_may_wakeup(dev)) - hpet_rtc_timer_init(); - do { CMOS_WRITE(tmp, RTC_CONTROL); hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK); @@ -872,6 +869,7 @@ static int cmos_resume(struct device *dev) rtc_update_irq(cmos->rtc, 1, mask); tmp &= ~RTC_AIE; hpet_mask_rtc_irq_bit(RTC_AIE); + hpet_rtc_timer_init(); } while (mask & RTC_AIE); spin_unlock_irq(&rtc_lock); } diff --git a/trunk/drivers/rtc/rtc-max8998.c b/trunk/drivers/rtc/rtc-max8998.c index d5af7baa48b5..48b6612fae7f 100644 --- a/trunk/drivers/rtc/rtc-max8998.c +++ b/trunk/drivers/rtc/rtc-max8998.c @@ -285,7 +285,7 @@ static int max8998_rtc_probe(struct platform_device *pdev) info->irq, ret); dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name); - if (pdata && pdata->rtc_delay) { + if (pdata->rtc_delay) { info->lp3974_bug_workaround = true; dev_warn(&pdev->dev, "LP3974 with RTC REGERR option." " RTC updates will be extremely slow.\n"); diff --git a/trunk/drivers/rtc/rtc-nuc900.c b/trunk/drivers/rtc/rtc-nuc900.c index d592e2fe43f7..f5dfb6e5e7d9 100644 --- a/trunk/drivers/rtc/rtc-nuc900.c +++ b/trunk/drivers/rtc/rtc-nuc900.c @@ -234,6 +234,11 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev) return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "platform_get_resource failed\n"); + return -ENXIO; + } + nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(nuc900_rtc->rtc_reg)) return PTR_ERR(nuc900_rtc->rtc_reg); diff --git a/trunk/drivers/rtc/rtc-omap.c b/trunk/drivers/rtc/rtc-omap.c index b0ba3fc991ea..4e1bdb832e37 100644 --- a/trunk/drivers/rtc/rtc-omap.c +++ b/trunk/drivers/rtc/rtc-omap.c @@ -347,6 +347,11 @@ static int __init omap_rtc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + pr_debug("%s: RTC resource data missing\n", pdev->name); + return -ENOENT; + } + rtc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(rtc_base)) return PTR_ERR(rtc_base); diff --git a/trunk/drivers/rtc/rtc-pl031.c b/trunk/drivers/rtc/rtc-pl031.c index 0f0609b1aa2c..8900ea784817 100644 --- a/trunk/drivers/rtc/rtc-pl031.c +++ b/trunk/drivers/rtc/rtc-pl031.c @@ -306,7 +306,7 @@ static int pl031_remove(struct amba_device *adev) struct pl031_local *ldata = dev_get_drvdata(&adev->dev); amba_set_drvdata(adev, NULL); - free_irq(adev->irq[0], ldata); + free_irq(adev->irq[0], ldata->rtc); rtc_device_unregister(ldata->rtc); iounmap(ldata->base); kfree(ldata); diff --git a/trunk/drivers/rtc/rtc-s3c.c b/trunk/drivers/rtc/rtc-s3c.c index 0b495e8b8e66..14040b22888d 100644 --- a/trunk/drivers/rtc/rtc-s3c.c +++ b/trunk/drivers/rtc/rtc-s3c.c @@ -477,6 +477,11 @@ static int s3c_rtc_probe(struct platform_device *pdev) /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "failed to get memory region resource\n"); + return -ENOENT; + } + s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(s3c_rtc_base)) return PTR_ERR(s3c_rtc_base); diff --git a/trunk/drivers/rtc/rtc-tegra.c b/trunk/drivers/rtc/rtc-tegra.c index 76af92ad5a8a..a34315d25478 100644 --- a/trunk/drivers/rtc/rtc-tegra.c +++ b/trunk/drivers/rtc/rtc-tegra.c @@ -322,6 +322,12 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, + "Unable to allocate resources for device.\n"); + return -EBUSY; + } + info->rtc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(info->rtc_base)) return PTR_ERR(info->rtc_base); diff --git a/trunk/drivers/rtc/rtc-tps6586x.c b/trunk/drivers/rtc/rtc-tps6586x.c index 426901cef14f..459c2ffc95a6 100644 --- a/trunk/drivers/rtc/rtc-tps6586x.c +++ b/trunk/drivers/rtc/rtc-tps6586x.c @@ -273,8 +273,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) return ret; } - device_init_wakeup(&pdev->dev, 1); - platform_set_drvdata(pdev, rtc); rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev), &tps6586x_rtc_ops, THIS_MODULE); @@ -294,6 +292,7 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) goto fail_rtc_register; } disable_irq(rtc->irq); + device_set_wakeup_capable(&pdev->dev, 1); return 0; fail_rtc_register: diff --git a/trunk/drivers/rtc/rtc-twl.c b/trunk/drivers/rtc/rtc-twl.c index b2eab34f38d9..8751a5240c99 100644 --- a/trunk/drivers/rtc/rtc-twl.c +++ b/trunk/drivers/rtc/rtc-twl.c @@ -524,7 +524,6 @@ static int twl_rtc_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, rtc); - device_init_wakeup(&pdev->dev, 1); return 0; out2: diff --git a/trunk/drivers/s390/block/dasd.c b/trunk/drivers/s390/block/dasd.c index d72a9216ee2e..4361d9772c42 100644 --- a/trunk/drivers/s390/block/dasd.c +++ b/trunk/drivers/s390/block/dasd.c @@ -3440,16 +3440,8 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) device->path_data.opm &= ~eventlpm; device->path_data.ppm &= ~eventlpm; device->path_data.npm &= ~eventlpm; - if (oldopm && !device->path_data.opm) { - dev_warn(&device->cdev->dev, - "No verified channel paths remain " - "for the device\n"); - DBF_DEV_EVENT(DBF_WARNING, device, - "%s", "last verified path gone"); - dasd_eer_write(device, NULL, DASD_EER_NOPATH); - dasd_device_set_stop_bits(device, - DASD_STOPPED_DC_WAIT); - } + if (oldopm && !device->path_data.opm) + dasd_generic_last_path_gone(device); } if (path_event[chp] & PE_PATH_AVAILABLE) { device->path_data.opm &= ~eventlpm; diff --git a/trunk/drivers/s390/block/xpram.c b/trunk/drivers/s390/block/xpram.c index 464dd29d06c0..690c3338a8ae 100644 --- a/trunk/drivers/s390/block/xpram.c +++ b/trunk/drivers/s390/block/xpram.c @@ -343,7 +343,6 @@ static int __init xpram_setup_blkdev(void) put_disk(xpram_disks[i]); goto out; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_logical_block_size(xpram_queues[i], 4096); } diff --git a/trunk/drivers/s390/cio/chp.c b/trunk/drivers/s390/cio/chp.c index 6c440d4349d4..21fabc6d5a9c 100644 --- a/trunk/drivers/s390/cio/chp.c +++ b/trunk/drivers/s390/cio/chp.c @@ -352,48 +352,12 @@ static ssize_t chp_shared_show(struct device *dev, static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); -static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct channel_path *chp = to_channelpath(dev); - ssize_t rc; - - mutex_lock(&chp->lock); - if (chp->desc_fmt1.flags & 0x10) - rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid); - else - rc = 0; - mutex_unlock(&chp->lock); - - return rc; -} -static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL); - -static ssize_t chp_chid_external_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct channel_path *chp = to_channelpath(dev); - ssize_t rc; - - mutex_lock(&chp->lock); - if (chp->desc_fmt1.flags & 0x10) - rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0); - else - rc = 0; - mutex_unlock(&chp->lock); - - return rc; -} -static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL); - static struct attribute *chp_attrs[] = { &dev_attr_status.attr, &dev_attr_configure.attr, &dev_attr_type.attr, &dev_attr_cmg.attr, &dev_attr_shared.attr, - &dev_attr_chid.attr, - &dev_attr_chid_external.attr, NULL, }; static struct attribute_group chp_attr_group = { diff --git a/trunk/drivers/s390/cio/chsc.h b/trunk/drivers/s390/cio/chsc.h index e7ef2a683b8f..349d5fc47196 100644 --- a/trunk/drivers/s390/cio/chsc.h +++ b/trunk/drivers/s390/cio/chsc.h @@ -43,9 +43,7 @@ struct channel_path_desc_fmt1 { u8 chpid; u32:24; u8 chpp; - u32 unused[2]; - u16 chid; - u32:16; + u32 unused[3]; u16 mdc; u16:13; u8 r:1; diff --git a/trunk/drivers/s390/net/netiucv.c b/trunk/drivers/s390/net/netiucv.c index 9ca3996f65b2..4ffa66c87ea5 100644 --- a/trunk/drivers/s390/net/netiucv.c +++ b/trunk/drivers/s390/net/netiucv.c @@ -2040,7 +2040,6 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata) netiucv_setup_netdevice); if (!dev) return NULL; - rtnl_lock(); if (dev_alloc_name(dev, dev->name) < 0) goto out_netdev; @@ -2062,7 +2061,6 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata) out_fsm: kfree_fsm(privptr->fsm); out_netdev: - rtnl_unlock(); free_netdev(dev); return NULL; } @@ -2102,7 +2100,6 @@ static ssize_t conn_write(struct device_driver *drv, rc = netiucv_register_device(dev); if (rc) { - rtnl_unlock(); IUCV_DBF_TEXT_(setup, 2, "ret %d from netiucv_register_device\n", rc); goto out_free_ndev; @@ -2112,8 +2109,7 @@ static ssize_t conn_write(struct device_driver *drv, priv = netdev_priv(dev); SET_NETDEV_DEV(dev, priv->dev); - rc = register_netdevice(dev); - rtnl_unlock(); + rc = register_netdev(dev); if (rc) goto out_unreg; diff --git a/trunk/drivers/scsi/bfa/bfad_debugfs.c b/trunk/drivers/scsi/bfa/bfad_debugfs.c index b63d534192e3..439c012be763 100644 --- a/trunk/drivers/scsi/bfa/bfad_debugfs.c +++ b/trunk/drivers/scsi/bfa/bfad_debugfs.c @@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig) file->f_pos += offset; break; case 2: - file->f_pos = debug->buffer_len + offset; + file->f_pos = debug->buffer_len - offset; break; default: return -EINVAL; diff --git a/trunk/drivers/scsi/fcoe/fcoe.c b/trunk/drivers/scsi/fcoe/fcoe.c index 32ae6c67ea3a..292b24f9bf93 100644 --- a/trunk/drivers/scsi/fcoe/fcoe.c +++ b/trunk/drivers/scsi/fcoe/fcoe.c @@ -1656,12 +1656,9 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { - /* must set skb->dev before calling vlan_put_tag */ + skb->vlan_tci = VLAN_TAG_PRESENT | + vlan_dev_vlan_id(fcoe->netdev); skb->dev = fcoe->realdev; - skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - vlan_dev_vlan_id(fcoe->netdev)); - if (!skb) - return -ENOMEM; } else skb->dev = fcoe->netdev; diff --git a/trunk/drivers/scsi/fcoe/fcoe_ctlr.c b/trunk/drivers/scsi/fcoe/fcoe_ctlr.c index 795843dde8ec..cd743c545ce9 100644 --- a/trunk/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/trunk/drivers/scsi/fcoe/fcoe_ctlr.c @@ -1548,6 +1548,9 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; struct fcoe_fcf *best = fip->sel_fcf; + struct fcoe_fcf *first; + + first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list); list_for_each_entry(fcf, &fip->fcfs, list) { LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " @@ -1565,15 +1568,17 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) "" : "un"); continue; } - if (!best || fcf->pri < best->pri || best->flogi_sent) - best = fcf; - if (fcf->fabric_name != best->fabric_name || - fcf->vfid != best->vfid || - fcf->fc_map != best->fc_map) { + if (fcf->fabric_name != first->fabric_name || + fcf->vfid != first->vfid || + fcf->fc_map != first->fc_map) { LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " "or FC-MAP\n"); return NULL; } + if (fcf->flogi_sent) + continue; + if (!best || fcf->pri < best->pri || best->flogi_sent) + best = fcf; } fip->sel_fcf = best; if (best) { diff --git a/trunk/drivers/scsi/fnic/fnic_debugfs.c b/trunk/drivers/scsi/fnic/fnic_debugfs.c index 85e1ffd0e5c5..adc1f7f471f5 100644 --- a/trunk/drivers/scsi/fnic/fnic_debugfs.c +++ b/trunk/drivers/scsi/fnic/fnic_debugfs.c @@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file, pos = file->f_pos + offset; break; case 2: - pos = fnic_dbg_prt->buffer_len + offset; + pos = fnic_dbg_prt->buffer_len - offset; } return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ? -EINVAL : (file->f_pos = pos); diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c index 6c4cedb44c07..82a3c1ec8706 100644 --- a/trunk/drivers/scsi/ipr.c +++ b/trunk/drivers/scsi/ipr.c @@ -8980,6 +8980,19 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) if (!ioa_cfg->res_entries) goto out; + if (ioa_cfg->sis64) { + ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) * + BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); + ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) * + BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); + ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) * + BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); + + if (!ioa_cfg->target_ids || !ioa_cfg->array_ids + || !ioa_cfg->vset_ids) + goto out_free_res_entries; + } + for (i = 0; i < ioa_cfg->max_devs_supported; i++) { list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; @@ -9076,6 +9089,9 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); out_free_res_entries: kfree(ioa_cfg->res_entries); + kfree(ioa_cfg->target_ids); + kfree(ioa_cfg->array_ids); + kfree(ioa_cfg->vset_ids); goto out; } diff --git a/trunk/drivers/scsi/ipr.h b/trunk/drivers/scsi/ipr.h index 07a85ce41782..a1fb840596ef 100644 --- a/trunk/drivers/scsi/ipr.h +++ b/trunk/drivers/scsi/ipr.h @@ -1440,9 +1440,9 @@ struct ipr_ioa_cfg { /* * Bitmaps for SIS64 generated target values */ - unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; - unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; - unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; + unsigned long *target_ids; + unsigned long *array_ids; + unsigned long *vset_ids; u16 type; /* CCIN of the card */ diff --git a/trunk/drivers/scsi/libfc/fc_exch.c b/trunk/drivers/scsi/libfc/fc_exch.c index 8b928c67e4b9..c772d8d27159 100644 --- a/trunk/drivers/scsi/libfc/fc_exch.c +++ b/trunk/drivers/scsi/libfc/fc_exch.c @@ -463,7 +463,13 @@ static void fc_exch_delete(struct fc_exch *ep) fc_exch_release(ep); /* drop hold for exch in mp */ } -static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, +/** + * fc_seq_send() - Send a frame using existing sequence/exchange pair + * @lport: The local port that the exchange will be sent on + * @sp: The sequence to be sent + * @fp: The frame to be sent on the exchange + */ +static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) { struct fc_exch *ep; @@ -473,7 +479,7 @@ static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, u8 fh_type = fh->fh_type; ep = fc_seq_exch(sp); - WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT)); + WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); f_ctl = ntoh24(fh->fh_f_ctl); fc_exch_setup_hdr(ep, fp, f_ctl); @@ -496,34 +502,17 @@ static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, error = lport->tt.frame_send(lport, fp); if (fh_type == FC_TYPE_BLS) - goto out; + return error; /* * Update the exchange and sequence flags, * assuming all frames for the sequence have been sent. * We can only be called to send once for each sequence. */ + spin_lock_bh(&ep->ex_lock); ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ if (f_ctl & FC_FC_SEQ_INIT) ep->esb_stat &= ~ESB_ST_SEQ_INIT; -out: - return error; -} - -/** - * fc_seq_send() - Send a frame using existing sequence/exchange pair - * @lport: The local port that the exchange will be sent on - * @sp: The sequence to be sent - * @fp: The frame to be sent on the exchange - */ -static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, - struct fc_frame *fp) -{ - struct fc_exch *ep; - int error; - ep = fc_seq_exch(sp); - spin_lock_bh(&ep->ex_lock); - error = fc_seq_send_locked(lport, sp, fp); spin_unlock_bh(&ep->ex_lock); return error; } @@ -640,7 +629,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep, if (fp) { fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - error = fc_seq_send_locked(ep->lp, sp, fp); + error = fc_seq_send(ep->lp, sp, fp); } else error = -ENOBUFS; return error; @@ -1143,7 +1132,7 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; f_ctl |= ep->f_ctl; fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); - fc_seq_send_locked(ep->lp, sp, fp); + fc_seq_send(ep->lp, sp, fp); } /** @@ -1318,8 +1307,8 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) ap->ba_low_seq_cnt = htons(sp->cnt); } sp = fc_seq_start_next_locked(sp); - fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); spin_unlock_bh(&ep->ex_lock); + fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); fc_frame_free(rx_fp); return; diff --git a/trunk/drivers/scsi/libfc/fc_rport.c b/trunk/drivers/scsi/libfc/fc_rport.c index 6bbb9447b75d..d518d17e940f 100644 --- a/trunk/drivers/scsi/libfc/fc_rport.c +++ b/trunk/drivers/scsi/libfc/fc_rport.c @@ -1962,7 +1962,7 @@ static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len, rdata->flags |= FC_RP_FLAGS_RETRY; rdata->supported_classes = FC_COS_CLASS3; - if (!(lport->service_params & FCP_SPPF_INIT_FCN)) + if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR)) return 0; spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR; diff --git a/trunk/drivers/scsi/lpfc/lpfc_debugfs.c b/trunk/drivers/scsi/lpfc/lpfc_debugfs.c index f525ecb7a9c6..f63f5ff7f274 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/trunk/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) pos = file->f_pos + off; break; case 2: - pos = debug->len + off; + pos = debug->len - off; } return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); } diff --git a/trunk/drivers/scsi/qla2xxx/qla_inline.h b/trunk/drivers/scsi/qla2xxx/qla_inline.h index 0a5c8951cebb..98ab921070d2 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_inline.h +++ b/trunk/drivers/scsi/qla2xxx/qla_inline.h @@ -278,14 +278,3 @@ qla2x00_do_host_ramp_up(scsi_qla_host_t *vha) set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags); } - -static inline void -qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status) -{ - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } -} diff --git a/trunk/drivers/scsi/qla2xxx/qla_isr.c b/trunk/drivers/scsi/qla2xxx/qla_isr.c index d2a4c75e5b8f..259d9205d876 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_isr.c +++ b/trunk/drivers/scsi/qla2xxx/qla_isr.c @@ -104,9 +104,14 @@ qla2100_intr_handler(int irq, void *dev_id) RD_REG_WORD(®->hccr); } } - qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } + return (IRQ_HANDLED); } @@ -216,9 +221,14 @@ qla2300_intr_handler(int irq, void *dev_id) WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD_RELAXED(®->hccr); } - qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } + return (IRQ_HANDLED); } @@ -2603,9 +2613,14 @@ qla24xx_intr_handler(int irq, void *dev_id) if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) ndelay(3500); } - qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } + return IRQ_HANDLED; } @@ -2748,9 +2763,13 @@ qla24xx_msix_default(int irq, void *dev_id) } WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); } while (0); - qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } return IRQ_HANDLED; } diff --git a/trunk/drivers/scsi/qla2xxx/qla_mbx.c b/trunk/drivers/scsi/qla2xxx/qla_mbx.c index 3587ec267fa6..9e5d89db7272 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_mbx.c +++ b/trunk/drivers/scsi/qla2xxx/qla_mbx.c @@ -179,6 +179,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + } else { ql_dbg(ql_dbg_mbx, vha, 0x1011, "Cmd=%x Polling Mode.\n", command); diff --git a/trunk/drivers/scsi/qla2xxx/qla_mr.c b/trunk/drivers/scsi/qla2xxx/qla_mr.c index a6df55838365..937fed8cb038 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_mr.c +++ b/trunk/drivers/scsi/qla2xxx/qla_mr.c @@ -148,6 +148,9 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); + + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + } else { ql_dbg(ql_dbg_mbx, vha, 0x112c, "Cmd=%x Polling Mode.\n", command); @@ -2931,10 +2934,13 @@ qlafx00_intr_handler(int irq, void *dev_id) QLAFX00_CLR_INTR_REG(ha, clr_intr); QLAFX00_RD_INTR_REG(ha); } - - qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } return IRQ_HANDLED; } diff --git a/trunk/drivers/scsi/qla2xxx/qla_nx.c b/trunk/drivers/scsi/qla2xxx/qla_nx.c index cce0cd0d7ec4..10754f518303 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_nx.c +++ b/trunk/drivers/scsi/qla2xxx/qla_nx.c @@ -2074,6 +2074,9 @@ qla82xx_intr_handler(int irq, void *dev_id) } WRT_REG_DWORD(®->host_int, 0); } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (!ha->flags.msi_enabled) + qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); #ifdef QL_DEBUG_LEVEL_17 if (!irq && ha->flags.eeh_busy) @@ -2082,12 +2085,11 @@ qla82xx_intr_handler(int irq, void *dev_id) status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); #endif - qla2x00_handle_mbx_completion(ha, status); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - if (!ha->flags.msi_enabled) - qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); - + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } return IRQ_HANDLED; } @@ -2147,6 +2149,8 @@ qla82xx_msix_default(int irq, void *dev_id) WRT_REG_DWORD(®->host_int, 0); } while (0); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + #ifdef QL_DEBUG_LEVEL_17 if (!irq && ha->flags.eeh_busy) ql_log(ql_log_warn, vha, 0x5044, @@ -2154,9 +2158,11 @@ qla82xx_msix_default(int irq, void *dev_id) status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); #endif - qla2x00_handle_mbx_completion(ha, status); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } return IRQ_HANDLED; } @@ -3339,7 +3345,7 @@ void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) ha->flags.mbox_busy = 0; ql_log(ql_log_warn, vha, 0x6010, "Doing premature completion of mbx command.\n"); - if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) complete(&ha->mbx_intr_comp); } } diff --git a/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 66b0b26a1381..d182c96e17ea 100644 --- a/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -688,12 +688,8 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen * for qla_tgt_xmit_response LLD code */ - if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { - se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; - se_cmd->residual_count = 0; - } se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; - se_cmd->residual_count += se_cmd->data_length; + se_cmd->residual_count = se_cmd->data_length; cmd->bufflen = 0; } @@ -1374,7 +1370,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) dump_stack(); return; } - target_wait_for_sess_cmds(se_sess); + target_wait_for_sess_cmds(se_sess, 0); transport_deregister_session_configfs(sess->se_sess); transport_deregister_session(sess->se_sess); diff --git a/trunk/drivers/scsi/scsi_proc.c b/trunk/drivers/scsi/scsi_proc.c index 86f0c5d5c116..db66357211ed 100644 --- a/trunk/drivers/scsi/scsi_proc.c +++ b/trunk/drivers/scsi/scsi_proc.c @@ -84,7 +84,6 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file) static const struct file_operations proc_scsi_fops = { .open = proc_scsi_host_open, - .release = single_release, .read = seq_read, .llseek = seq_lseek, .write = proc_scsi_host_write diff --git a/trunk/drivers/spi/spi-atmel.c b/trunk/drivers/spi/spi-atmel.c index 380387a47b1d..787bd2c22bca 100644 --- a/trunk/drivers/spi/spi-atmel.c +++ b/trunk/drivers/spi/spi-atmel.c @@ -526,17 +526,13 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master, } if (xfer->tx_buf) - if (xfer->bits_per_word > 8) - spi_writel(as, TDR, *(u16 *)(xfer->tx_buf)); - else - spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); + spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); else spi_writel(as, TDR, 0); dev_dbg(master->dev.parent, - " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", - xfer, xfer->len, xfer->tx_buf, xfer->rx_buf, - xfer->bits_per_word); + " start pio xfer %p: len %u tx %p rx %p\n", + xfer, xfer->len, xfer->tx_buf, xfer->rx_buf); /* Enable relevant interrupts */ spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); @@ -954,39 +950,21 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) { u8 *txp; u8 *rxp; - u16 *txp16; - u16 *rxp16; unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; if (xfer->rx_buf) { - if (xfer->bits_per_word > 8) { - rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); - *rxp16 = spi_readl(as, RDR); - } else { - rxp = ((u8 *)xfer->rx_buf) + xfer_pos; - *rxp = spi_readl(as, RDR); - } + rxp = ((u8 *)xfer->rx_buf) + xfer_pos; + *rxp = spi_readl(as, RDR); } else { spi_readl(as, RDR); } - if (xfer->bits_per_word > 8) { - as->current_remaining_bytes -= 2; - if (as->current_remaining_bytes < 0) - as->current_remaining_bytes = 0; - } else { - as->current_remaining_bytes--; - } + + as->current_remaining_bytes--; if (as->current_remaining_bytes) { if (xfer->tx_buf) { - if (xfer->bits_per_word > 8) { - txp16 = (u16 *)(((u8 *)xfer->tx_buf) - + xfer_pos + 2); - spi_writel(as, TDR, *txp16); - } else { - txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; - spi_writel(as, TDR, *txp); - } + txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; + spi_writel(as, TDR, *txp); } else { spi_writel(as, TDR, 0); } @@ -1400,16 +1378,9 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) } } - if (xfer->bits_per_word > 8) { - if (xfer->len % 2) { - dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n"); - return -EINVAL; - } - } - /* FIXME implement these protocol options!! */ - if (xfer->speed_hz < spi->max_speed_hz) { - dev_dbg(&spi->dev, "can't change speed in transfer\n"); + if (xfer->speed_hz) { + dev_dbg(&spi->dev, "no protocol options yet\n"); return -ENOPROTOOPT; } diff --git a/trunk/drivers/spi/spi-davinci.c b/trunk/drivers/spi/spi-davinci.c index 50b13c9b1ab6..2e8f24a1fb95 100644 --- a/trunk/drivers/spi/spi-davinci.c +++ b/trunk/drivers/spi/spi-davinci.c @@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = { }, { }, }; -MODULE_DEVICE_TABLE(of, davinci_spi_of_match); +MODULE_DEVICE_TABLE(of, davini_spi_of_match); /** * spi_davinci_get_pdata - Get platform data from DTS binding diff --git a/trunk/drivers/spi/spi-pxa2xx-dma.c b/trunk/drivers/spi/spi-pxa2xx-dma.c index 6427600b5bbe..c735c5a008a2 100644 --- a/trunk/drivers/spi/spi-pxa2xx-dma.c +++ b/trunk/drivers/spi/spi-pxa2xx-dma.c @@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, int ret; sg_free_table(sgt); - ret = sg_alloc_table(sgt, nents, GFP_ATOMIC); + ret = sg_alloc_table(sgt, nents, GFP_KERNEL); if (ret) return ret; } diff --git a/trunk/drivers/spi/spi-pxa2xx.c b/trunk/drivers/spi/spi-pxa2xx.c index 48b396fced0a..f5d84d6f8222 100644 --- a/trunk/drivers/spi/spi-pxa2xx.c +++ b/trunk/drivers/spi/spi-pxa2xx.c @@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) return NULL; - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); if (!pdata) { dev_err(&pdev->dev, "failed to allocate memory for platform data\n"); diff --git a/trunk/drivers/spi/spi-s3c64xx.c b/trunk/drivers/spi/spi-s3c64xx.c index 71cc3e6ef47c..5000586cb98d 100644 --- a/trunk/drivers/spi/spi-s3c64xx.c +++ b/trunk/drivers/spi/spi-s3c64xx.c @@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) } ret = pm_runtime_get_sync(&sdd->pdev->dev); - if (ret < 0) { + if (ret != 0) { dev_err(dev, "Failed to enable device: %d\n", ret); goto out_tx; } diff --git a/trunk/drivers/spi/spi-sh-hspi.c b/trunk/drivers/spi/spi-sh-hspi.c index eab593eaaafa..60cfae51c713 100644 --- a/trunk/drivers/spi/spi-sh-hspi.c +++ b/trunk/drivers/spi/spi-sh-hspi.c @@ -89,7 +89,7 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val) if ((mask & hspi_read(hspi, SPSR)) == val) return 0; - udelay(10); + msleep(20); } dev_err(hspi->dev, "timeout\n"); diff --git a/trunk/drivers/spi/spi-tegra20-sflash.c b/trunk/drivers/spi/spi-tegra20-sflash.c index 09df8e22dba0..d65c000efe35 100644 --- a/trunk/drivers/spi/spi-tegra20-sflash.c +++ b/trunk/drivers/spi/spi-tegra20-sflash.c @@ -489,6 +489,11 @@ static int tegra_sflash_probe(struct platform_device *pdev) tegra_sflash_parse_dt(tsd); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "No IO memory resource\n"); + ret = -ENODEV; + goto exit_free_master; + } tsd->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(tsd->base)) { ret = PTR_ERR(tsd->base); diff --git a/trunk/drivers/spi/spi-topcliff-pch.c b/trunk/drivers/spi/spi-topcliff-pch.c index 637d728fbeb5..35f60bd252dd 100644 --- a/trunk/drivers/spi/spi-topcliff-pch.c +++ b/trunk/drivers/spi/spi-topcliff-pch.c @@ -1487,7 +1487,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) return 0; err_spi_register_master: - free_irq(board_dat->pdev->irq, data); + free_irq(board_dat->pdev->irq, board_dat); err_request_irq: pch_spi_free_resources(board_dat, data); err_spi_get_resources: @@ -1667,7 +1667,6 @@ static int pch_spi_probe(struct pci_dev *pdev, pd_dev = platform_device_alloc("pch-spi", i); if (!pd_dev) { dev_err(&pdev->dev, "platform_device_alloc failed\n"); - retval = -ENOMEM; goto err_platform_device; } pd_dev_save->pd_save[i] = pd_dev; diff --git a/trunk/drivers/spi/spi-xilinx.c b/trunk/drivers/spi/spi-xilinx.c index 34d18dcfa0db..e1d769607425 100644 --- a/trunk/drivers/spi/spi-xilinx.c +++ b/trunk/drivers/spi/spi-xilinx.c @@ -267,6 +267,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) { struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); u32 ipif_ier; + u16 cr; /* We get here with transmitter inhibited */ @@ -275,6 +276,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) xspi->remaining_bytes = t->len; INIT_COMPLETION(xspi->done); + xilinx_spi_fill_tx_fifo(xspi); /* Enable the transmit empty interrupt, which we use to determine * progress on the transmission. @@ -283,41 +285,12 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, xspi->regs + XIPIF_V123B_IIER_OFFSET); - for (;;) { - u16 cr; - u8 sr; - - xilinx_spi_fill_tx_fifo(xspi); - - /* Start the transfer by not inhibiting the transmitter any - * longer - */ - cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & - ~XSPI_CR_TRANS_INHIBIT; - xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); - - wait_for_completion(&xspi->done); - - /* A transmit has just completed. Process received data and - * check for more data to transmit. Always inhibit the - * transmitter while the Isr refills the transmit register/FIFO, - * or make sure it is stopped if we're done. - */ - cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); - xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, - xspi->regs + XSPI_CR_OFFSET); - - /* Read out all the data from the Rx FIFO */ - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { - xspi->rx_fn(xspi); - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - } + /* Start the transfer by not inhibiting the transmitter any longer */ + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & + ~XSPI_CR_TRANS_INHIBIT; + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); - /* See if there is more data to send */ - if (!xspi->remaining_bytes > 0) - break; - } + wait_for_completion(&xspi->done); /* Disable the transmit empty interrupt */ xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); @@ -341,7 +314,38 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET); if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */ - complete(&xspi->done); + u16 cr; + u8 sr; + + /* A transmit has just completed. Process received data and + * check for more data to transmit. Always inhibit the + * transmitter while the Isr refills the transmit register/FIFO, + * or make sure it is stopped if we're done. + */ + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); + xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, + xspi->regs + XSPI_CR_OFFSET); + + /* Read out all the data from the Rx FIFO */ + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); + while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { + xspi->rx_fn(xspi); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); + } + + /* See if there is more data to send */ + if (xspi->remaining_bytes > 0) { + xilinx_spi_fill_tx_fifo(xspi); + /* Start the transfer by not inhibiting the + * transmitter any longer + */ + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); + } else { + /* No more data to send. + * Indicate the transfer is completed. + */ + complete(&xspi->done); + } } return IRQ_HANDLED; diff --git a/trunk/drivers/spi/spi.c b/trunk/drivers/spi/spi.c index 32b7bb111eb6..163fd802b7ac 100644 --- a/trunk/drivers/spi/spi.c +++ b/trunk/drivers/spi/spi.c @@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master) spi->dev.parent = &master->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; - spi->cs_gpio = -ENOENT; + spi->cs_gpio = -EINVAL; device_initialize(&spi->dev); return spi; } @@ -1067,11 +1067,8 @@ static int of_spi_register_master(struct spi_master *master) nb = of_gpio_named_count(np, "cs-gpios"); master->num_chipselect = max(nb, (int)master->num_chipselect); - /* Return error only for an incorrectly formed cs-gpios property */ - if (nb == 0 || nb == -ENOENT) + if (nb < 1) return 0; - else if (nb < 0) - return nb; cs = devm_kzalloc(&master->dev, sizeof(int) * master->num_chipselect, @@ -1082,7 +1079,7 @@ static int of_spi_register_master(struct spi_master *master) return -ENOMEM; for (i = 0; i < master->num_chipselect; i++) - cs[i] = -ENOENT; + cs[i] = -EINVAL; for (i = 0; i < nb; i++) cs[i] = of_get_named_gpio(np, "cs-gpios", i); diff --git a/trunk/drivers/staging/Kconfig b/trunk/drivers/staging/Kconfig index aefe820a8005..4e8a1794f50a 100644 --- a/trunk/drivers/staging/Kconfig +++ b/trunk/drivers/staging/Kconfig @@ -72,10 +72,10 @@ source "drivers/staging/sep/Kconfig" source "drivers/staging/iio/Kconfig" -source "drivers/staging/zsmalloc/Kconfig" - source "drivers/staging/zram/Kconfig" +source "drivers/staging/zsmalloc/Kconfig" + source "drivers/staging/wlags49_h2/Kconfig" source "drivers/staging/wlags49_h25/Kconfig" diff --git a/trunk/drivers/staging/android/alarm-dev.c b/trunk/drivers/staging/android/alarm-dev.c index 6dc27dac679d..ceb1c643753d 100644 --- a/trunk/drivers/staging/android/alarm-dev.c +++ b/trunk/drivers/staging/android/alarm-dev.c @@ -264,8 +264,6 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } rv = alarm_do_ioctl(file, cmd, &ts); - if (rv) - return rv; switch (ANDROID_ALARM_BASE_CMD(cmd)) { case ANDROID_ALARM_GET_TIME(0): @@ -274,7 +272,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } - return 0; + return rv; } #ifdef CONFIG_COMPAT static long alarm_compat_ioctl(struct file *file, unsigned int cmd, @@ -297,8 +295,6 @@ static long alarm_compat_ioctl(struct file *file, unsigned int cmd, } rv = alarm_do_ioctl(file, cmd, &ts); - if (rv) - return rv; switch (ANDROID_ALARM_BASE_CMD(cmd)) { case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */ @@ -307,7 +303,7 @@ static long alarm_compat_ioctl(struct file *file, unsigned int cmd, break; } - return 0; + return rv; } #endif diff --git a/trunk/drivers/staging/android/logger.c b/trunk/drivers/staging/android/logger.c index 9bd874789ce5..b040200a5a55 100644 --- a/trunk/drivers/staging/android/logger.c +++ b/trunk/drivers/staging/android/logger.c @@ -242,7 +242,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log, * 'log->buffer' which contains the first entry readable by 'euid' */ static size_t get_next_entry_by_uid(struct logger_log *log, - size_t off, kuid_t euid) + size_t off, uid_t euid) { while (off != log->w_off) { struct logger_entry *entry; @@ -251,7 +251,7 @@ static size_t get_next_entry_by_uid(struct logger_log *log, entry = get_entry_header(log, off, &scratch); - if (uid_eq(entry->euid, euid)) + if (entry->euid == euid) return off; next_len = sizeof(struct logger_entry) + entry->len; diff --git a/trunk/drivers/staging/android/logger.h b/trunk/drivers/staging/android/logger.h index 70af7d805dff..cc6bbd99c8e0 100644 --- a/trunk/drivers/staging/android/logger.h +++ b/trunk/drivers/staging/android/logger.h @@ -66,7 +66,7 @@ struct logger_entry { __s32 tid; __s32 sec; __s32 nsec; - kuid_t euid; + uid_t euid; char msg[0]; }; diff --git a/trunk/drivers/staging/comedi/Kconfig b/trunk/drivers/staging/comedi/Kconfig index 87e852a0ef49..7871579bb83d 100644 --- a/trunk/drivers/staging/comedi/Kconfig +++ b/trunk/drivers/staging/comedi/Kconfig @@ -981,7 +981,6 @@ config COMEDI_ME_DAQ config COMEDI_NI_6527 tristate "NI 6527 support" - depends on HAS_DMA select COMEDI_MITE ---help--- Enable support for the National Instruments 6527 PCI card @@ -991,7 +990,6 @@ config COMEDI_NI_6527 config COMEDI_NI_65XX tristate "NI 65xx static dio PCI card support" - depends on HAS_DMA select COMEDI_MITE ---help--- Enable support for National Instruments 65xx static dio boards. @@ -1005,7 +1003,6 @@ config COMEDI_NI_65XX config COMEDI_NI_660X tristate "NI 660x counter/timer PCI card support" - depends on HAS_DMA select COMEDI_NI_TIOCMD ---help--- Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602, @@ -1016,7 +1013,6 @@ config COMEDI_NI_660X config COMEDI_NI_670X tristate "NI 670x PCI card support" - depends on HAS_DMA select COMEDI_MITE ---help--- Enable support for National Instruments PCI-6703 and PCI-6704 @@ -1026,7 +1022,6 @@ config COMEDI_NI_670X config COMEDI_NI_LABPC_PCI tristate "NI Lab-PC PCI-1200 support" - depends on HAS_DMA select COMEDI_NI_LABPC select COMEDI_MITE ---help--- @@ -1037,7 +1032,6 @@ config COMEDI_NI_LABPC_PCI config COMEDI_NI_PCIDIO tristate "NI PCI-DIO32HS, PCI-6533, PCI-6534 support" - depends on HAS_DMA select COMEDI_MITE select COMEDI_8255 ---help--- @@ -1049,7 +1043,6 @@ config COMEDI_NI_PCIDIO config COMEDI_NI_PCIMIO tristate "NI PCI-MIO-E series and M series support" - depends on HAS_DMA select COMEDI_NI_TIOCMD select COMEDI_8255 select COMEDI_FC @@ -1102,12 +1095,10 @@ config COMEDI_SSV_DNP called ssv_dnp. config COMEDI_MITE - depends on HAS_DMA tristate config COMEDI_NI_TIOCMD tristate - depends on HAS_DMA select COMEDI_NI_TIO select COMEDI_MITE diff --git a/trunk/drivers/staging/comedi/comedi_buf.c b/trunk/drivers/staging/comedi/comedi_buf.c index d4be0e68509b..ca709901fb3e 100644 --- a/trunk/drivers/staging/comedi/comedi_buf.c +++ b/trunk/drivers/staging/comedi/comedi_buf.c @@ -51,12 +51,10 @@ static void __comedi_buf_free(struct comedi_device *dev, clear_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags)); if (s->async_dma_dir != DMA_NONE) { -#ifdef CONFIG_HAS_DMA dma_free_coherent(dev->hw_dev, PAGE_SIZE, buf->virt_addr, buf->dma_addr); -#endif } else { free_page((unsigned long)buf->virt_addr); } @@ -76,12 +74,6 @@ static void __comedi_buf_alloc(struct comedi_device *dev, struct comedi_buf_page *buf; unsigned i; - if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) { - dev_err(dev->class_dev, - "dma buffer allocation not supported\n"); - return; - } - async->buf_page_list = vzalloc(sizeof(*buf) * n_pages); if (async->buf_page_list) pages = vmalloc(sizeof(struct page *) * n_pages); @@ -92,15 +84,11 @@ static void __comedi_buf_alloc(struct comedi_device *dev, for (i = 0; i < n_pages; i++) { buf = &async->buf_page_list[i]; if (s->async_dma_dir != DMA_NONE) -#ifdef CONFIG_HAS_DMA buf->virt_addr = dma_alloc_coherent(dev->hw_dev, PAGE_SIZE, &buf->dma_addr, GFP_KERNEL | __GFP_COMP); -#else - break; -#endif else buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL); if (!buf->virt_addr) diff --git a/trunk/drivers/staging/comedi/comedi_fops.c b/trunk/drivers/staging/comedi/comedi_fops.c index 924c54c9c31f..00f2547024ec 100644 --- a/trunk/drivers/staging/comedi/comedi_fops.c +++ b/trunk/drivers/staging/comedi/comedi_fops.c @@ -246,6 +246,9 @@ static int resize_async_buffer(struct comedi_device *dev, return -EBUSY; } + if (!async->prealloc_buf) + return -EINVAL; + /* make sure buffer is an integral number of pages * (we round up) */ new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK; diff --git a/trunk/drivers/staging/comedi/drivers/ni_labpc.c b/trunk/drivers/staging/comedi/drivers/ni_labpc.c index 77a7bb632580..3d978f34d212 100644 --- a/trunk/drivers/staging/comedi/drivers/ni_labpc.c +++ b/trunk/drivers/staging/comedi/drivers/ni_labpc.c @@ -976,7 +976,8 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma_chan); - set_dma_addr(devpriv->dma_chan, devpriv->dma_addr); + set_dma_addr(devpriv->dma_chan, + virt_to_bus(devpriv->dma_buffer)); /* set appropriate size of transfer */ devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd); if (cmd->stop_src == TRIG_COUNT && @@ -1088,7 +1089,7 @@ static void labpc_drain_dma(struct comedi_device *dev) devpriv->count -= num_points; /* set address and count for next transfer */ - set_dma_addr(devpriv->dma_chan, devpriv->dma_addr); + set_dma_addr(devpriv->dma_chan, virt_to_bus(devpriv->dma_buffer)); set_dma_count(devpriv->dma_chan, leftover * sample_size); release_dma_lock(flags); @@ -1740,9 +1741,6 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it) unsigned long dma_flags; devpriv->dma_chan = dma_chan; - devpriv->dma_addr = - virt_to_bus(devpriv->dma_buffer); - dma_flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); set_dma_mode(devpriv->dma_chan, DMA_MODE_READ); diff --git a/trunk/drivers/staging/comedi/drivers/ni_labpc.h b/trunk/drivers/staging/comedi/drivers/ni_labpc.h index 4b691f5a9965..615f16f271c0 100644 --- a/trunk/drivers/staging/comedi/drivers/ni_labpc.h +++ b/trunk/drivers/staging/comedi/drivers/ni_labpc.h @@ -82,7 +82,6 @@ struct labpc_private { unsigned int divisor_b1; unsigned int dma_chan; /* dma channel to use */ u16 *dma_buffer; /* buffer ai will dma into */ - phys_addr_t dma_addr; /* transfer size in bytes for current transfer */ unsigned int dma_transfer_size; /* we are using dma/fifo-half-full/etc. */ diff --git a/trunk/drivers/staging/comedi/drivers/ni_mio_common.c b/trunk/drivers/staging/comedi/drivers/ni_mio_common.c index 8c5dee9b3b05..a46d579016d9 100644 --- a/trunk/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/trunk/drivers/staging/comedi/drivers/ni_mio_common.c @@ -310,11 +310,9 @@ static int ni_gpct_insn_read(struct comedi_device *dev, static int ni_gpct_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); -#ifdef PCIDMA static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int ni_gpct_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); -#endif static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void handle_gpct_interrupt(struct comedi_device *dev, @@ -4619,7 +4617,9 @@ static int ni_E_init(struct comedi_device *dev) for (j = 0; j < NUM_GPCT; ++j) { s = &dev->subdevices[NI_GPCT_SUBDEV(j)]; s->type = COMEDI_SUBD_COUNTER; - s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL; + s->subdev_flags = + SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL | SDF_CMD_READ + /* | SDF_CMD_WRITE */ ; s->n_chan = 3; if (board->reg_type & ni_reg_m_series_mask) s->maxdata = 0xffffffff; @@ -4628,14 +4628,11 @@ static int ni_E_init(struct comedi_device *dev) s->insn_read = &ni_gpct_insn_read; s->insn_write = &ni_gpct_insn_write; s->insn_config = &ni_gpct_insn_config; -#ifdef PCIDMA - s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */; s->do_cmd = &ni_gpct_cmd; s->len_chanlist = 1; s->do_cmdtest = &ni_gpct_cmdtest; s->cancel = &ni_gpct_cancel; s->async_dma_dir = DMA_BIDIRECTIONAL; -#endif s->private = &devpriv->counter_dev->counters[j]; devpriv->counter_dev->counters[j].chip_index = 0; @@ -5219,10 +5216,10 @@ static int ni_gpct_insn_write(struct comedi_device *dev, return ni_tio_winsn(counter, insn, data); } -#ifdef PCIDMA static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { int retval; +#ifdef PCIDMA struct ni_gpct *counter = s->private; /* const struct comedi_cmd *cmd = &s->async->cmd; */ @@ -5236,20 +5233,23 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s) ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL); ni_e_series_enable_second_irq(dev, counter->counter_index, 1); retval = ni_tio_cmd(counter, s->async); +#else + retval = -ENOTSUPP; +#endif return retval; } -#endif -#ifdef PCIDMA static int ni_gpct_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { +#ifdef PCIDMA struct ni_gpct *counter = s->private; return ni_tio_cmdtest(counter, cmd); +#else return -ENOTSUPP; -} #endif +} static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { diff --git a/trunk/drivers/staging/dwc2/Kconfig b/trunk/drivers/staging/dwc2/Kconfig index d15d9d58e5ac..f0b4739c65a1 100644 --- a/trunk/drivers/staging/dwc2/Kconfig +++ b/trunk/drivers/staging/dwc2/Kconfig @@ -2,6 +2,7 @@ config USB_DWC2 tristate "DesignWare USB2 DRD Core Support" depends on USB depends on VIRT_TO_BUS + select USB_OTG_UTILS help Say Y or M here if your system has a Dual Role HighSpeed USB controller based on the DesignWare HSOTG IP Core. @@ -38,7 +39,6 @@ config USB_DWC2_TRACK_MISSED_SOFS bool "Enable Missed SOF Tracking" help Say Y here to enable logging of missed SOF events to the dmesg log. - WARNING: This feature is still experimental. If in doubt, say N. config USB_DWC2_DEBUG_PERIODIC diff --git a/trunk/drivers/staging/dwc2/hcd.c b/trunk/drivers/staging/dwc2/hcd.c index 8551ccedf037..827ab781ae9b 100644 --- a/trunk/drivers/staging/dwc2/hcd.c +++ b/trunk/drivers/staging/dwc2/hcd.c @@ -2804,8 +2804,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq, /* Set device flags indicating whether the HCD supports DMA */ if (hsotg->core_params->dma_enable > 0) { - if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) - dev_warn(hsotg->dev, "can't set DMA mask\n"); + if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0) + dev_warn(hsotg->dev, + "can't enable workaround for >2GB RAM\n"); if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0) dev_warn(hsotg->dev, "can't enable workaround for >2GB RAM\n"); diff --git a/trunk/drivers/staging/dwc2/hcd_intr.c b/trunk/drivers/staging/dwc2/hcd_intr.c index e24062f0a49e..6e5dbed6ccec 100644 --- a/trunk/drivers/staging/dwc2/hcd_intr.c +++ b/trunk/drivers/staging/dwc2/hcd_intr.c @@ -56,6 +56,8 @@ static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) { #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS +#warning Compiling code to track missed SOFs + u16 curr_frame_number = hsotg->frame_number; if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) { diff --git a/trunk/drivers/staging/dwc2/platform.c b/trunk/drivers/staging/dwc2/platform.c index 44cce2fa6361..1f3d581a1078 100644 --- a/trunk/drivers/staging/dwc2/platform.c +++ b/trunk/drivers/staging/dwc2/platform.c @@ -95,14 +95,6 @@ static int dwc2_driver_probe(struct platform_device *dev) hsotg->dev = &dev->dev; - /* - * Use reasonable defaults so platforms don't have to provide these. - */ - if (!dev->dev.dma_mask) - dev->dev.dma_mask = &dev->dev.coherent_dma_mask; - if (!dev->dev.coherent_dma_mask) - dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - irq = platform_get_irq(dev, 0); if (irq < 0) { dev_err(&dev->dev, "missing IRQ resource\n"); @@ -110,6 +102,11 @@ static int dwc2_driver_probe(struct platform_device *dev) } res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&dev->dev, "missing memory base resource\n"); + return -EINVAL; + } + hsotg->regs = devm_ioremap_resource(&dev->dev, res); if (IS_ERR(hsotg->regs)) return PTR_ERR(hsotg->regs); diff --git a/trunk/drivers/staging/gdm72xx/Kconfig b/trunk/drivers/staging/gdm72xx/Kconfig index 69059138de4a..3c18efe31365 100644 --- a/trunk/drivers/staging/gdm72xx/Kconfig +++ b/trunk/drivers/staging/gdm72xx/Kconfig @@ -39,7 +39,7 @@ if WIMAX_GDM72XX_USB config WIMAX_GDM72XX_USB_PM bool "Enable power managerment support" - depends on PM_RUNTIME + depends on USB_SUSPEND endif # WIMAX_GDM72XX_USB diff --git a/trunk/drivers/staging/iio/adc/mxs-lradc.c b/trunk/drivers/staging/iio/adc/mxs-lradc.c index 163c638e4095..2856b8fd44ad 100644 --- a/trunk/drivers/staging/iio/adc/mxs-lradc.c +++ b/trunk/drivers/staging/iio/adc/mxs-lradc.c @@ -690,6 +690,7 @@ static void mxs_lradc_trigger_remove(struct iio_dev *iio) static int mxs_lradc_buffer_preenable(struct iio_dev *iio) { struct mxs_lradc *lradc = iio_priv(iio); + struct iio_buffer *buffer = iio->buffer; int ret = 0, chan, ofs = 0; unsigned long enable = 0; uint32_t ctrl4_set = 0; @@ -697,7 +698,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio) uint32_t ctrl1_irq = 0; const uint32_t chan_value = LRADC_CH_ACCUMULATE | ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); - const int len = bitmap_weight(iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS); + const int len = bitmap_weight(buffer->scan_mask, LRADC_MAX_TOTAL_CHANS); if (!len) return -EINVAL; @@ -724,7 +725,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio) lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR); writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR); - for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { + for_each_set_bit(chan, buffer->scan_mask, LRADC_MAX_TOTAL_CHANS) { ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs); ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs); diff --git a/trunk/drivers/staging/iio/light/tsl2x7x_core.c b/trunk/drivers/staging/iio/light/tsl2x7x_core.c index c99f890cc6c6..d060f2572512 100644 --- a/trunk/drivers/staging/iio/light/tsl2x7x_core.c +++ b/trunk/drivers/staging/iio/light/tsl2x7x_core.c @@ -1869,7 +1869,6 @@ static int tsl2x7x_probe(struct i2c_client *clientp, dev_info(&chip->client->dev, "%s: i2c device found does not match expected id\n", __func__); - ret = -EINVAL; goto fail1; } @@ -1908,7 +1907,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp, if (ret) { dev_err(&clientp->dev, "%s: irq request failed", __func__); - goto fail1; + goto fail2; } } @@ -1921,17 +1920,17 @@ static int tsl2x7x_probe(struct i2c_client *clientp, if (ret) { dev_err(&clientp->dev, "%s: iio registration failed\n", __func__); - goto fail2; + goto fail1; } dev_info(&clientp->dev, "%s Light sensor found.\n", id->name); return 0; -fail2: +fail1: if (clientp->irq) free_irq(clientp->irq, indio_dev); -fail1: +fail2: iio_device_free(indio_dev); return ret; diff --git a/trunk/drivers/staging/imx-drm/Kconfig b/trunk/drivers/staging/imx-drm/Kconfig index ef699f753186..8c9e40390f42 100644 --- a/trunk/drivers/staging/imx-drm/Kconfig +++ b/trunk/drivers/staging/imx-drm/Kconfig @@ -1,7 +1,6 @@ config DRM_IMX tristate "DRM Support for Freescale i.MX" select DRM_KMS_HELPER - select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) @@ -20,12 +19,10 @@ config DRM_IMX_FB_HELPER config DRM_IMX_PARALLEL_DISPLAY tristate "Support for parallel displays" depends on DRM_IMX - select VIDEOMODE_HELPERS config DRM_IMX_TVE tristate "Support for TV and VGA displays" depends on DRM_IMX - select REGMAP_MMIO help Choose this to enable the internal Television Encoder (TVe) found on i.MX53 processors. @@ -33,7 +30,6 @@ config DRM_IMX_TVE config DRM_IMX_IPUV3_CORE tristate "IPUv3 core support" depends on DRM_IMX - depends on RESET_CONTROLLER help Choose this if you have a i.MX5/6 system and want to use the IPU. This option only enables IPU base @@ -42,6 +38,5 @@ config DRM_IMX_IPUV3_CORE config DRM_IMX_IPUV3 tristate "DRM Support for i.MX IPUv3" depends on DRM_IMX - depends on DRM_IMX_IPUV3_CORE help Choose this if you have a i.MX5 or i.MX6 processor. diff --git a/trunk/drivers/staging/imx-drm/imx-tve.c b/trunk/drivers/staging/imx-drm/imx-tve.c index 03892de9bd7e..ac1634464407 100644 --- a/trunk/drivers/staging/imx-drm/imx-tve.c +++ b/trunk/drivers/staging/imx-drm/imx-tve.c @@ -670,9 +670,7 @@ static int imx_tve_probe(struct platform_device *pdev) tve->dac_reg = devm_regulator_get(&pdev->dev, "dac"); if (!IS_ERR(tve->dac_reg)) { regulator_set_voltage(tve->dac_reg, 2750000, 2750000); - ret = regulator_enable(tve->dac_reg); - if (ret) - return ret; + regulator_enable(tve->dac_reg); } tve->clk = devm_clk_get(&pdev->dev, "tve"); diff --git a/trunk/drivers/staging/imx-drm/ipuv3-crtc.c b/trunk/drivers/staging/imx-drm/ipuv3-crtc.c index ff5c63350932..ea61c869110f 100644 --- a/trunk/drivers/staging/imx-drm/ipuv3-crtc.c +++ b/trunk/drivers/staging/imx-drm/ipuv3-crtc.c @@ -316,14 +316,31 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc, static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) { + struct drm_pending_vblank_event *e; + struct timeval now; unsigned long flags; struct drm_device *drm = ipu_crtc->base.dev; spin_lock_irqsave(&drm->event_lock, flags); - if (ipu_crtc->page_flip_event) - drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); + + e = ipu_crtc->page_flip_event; + if (!e) { + spin_unlock_irqrestore(&drm->event_lock, flags); + return; + } + + do_gettimeofday(&now); + e->event.sequence = 0; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; ipu_crtc->page_flip_event = NULL; + imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); + + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + + wake_up_interruptible(&e->base.file_priv->event_wait); + spin_unlock_irqrestore(&drm->event_lock, flags); } diff --git a/trunk/drivers/staging/media/davinci_vpfe/Kconfig b/trunk/drivers/staging/media/davinci_vpfe/Kconfig index 12f321dd2399..2e4a28b018e8 100644 --- a/trunk/drivers/staging/media/davinci_vpfe/Kconfig +++ b/trunk/drivers/staging/media/davinci_vpfe/Kconfig @@ -1,6 +1,6 @@ config VIDEO_DM365_VPFE tristate "DM365 VPFE Media Controller Capture Driver" - depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF + depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE select VIDEOBUF2_DMA_CONTIG help Support for DM365 VPFE based Media Controller Capture driver. diff --git a/trunk/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/trunk/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c index d8ce20d2fbda..b88e1ddce229 100644 --- a/trunk/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c +++ b/trunk/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c @@ -639,8 +639,7 @@ static int vpfe_probe(struct platform_device *pdev) if (ret) goto probe_free_dev_mem; - ret = vpfe_initialize_modules(vpfe_dev, pdev); - if (ret) + if (vpfe_initialize_modules(vpfe_dev, pdev)) goto probe_disable_clock; vpfe_dev->media_dev.dev = vpfe_dev->pdev; @@ -664,8 +663,7 @@ static int vpfe_probe(struct platform_device *pdev) /* set the driver data in platform device */ platform_set_drvdata(pdev, vpfe_dev); /* register subdevs/entities */ - ret = vpfe_register_entities(vpfe_dev); - if (ret) + if (vpfe_register_entities(vpfe_dev)) goto probe_out_v4l2_unregister; ret = vpfe_attach_irq(vpfe_dev); diff --git a/trunk/drivers/staging/media/solo6x10/Kconfig b/trunk/drivers/staging/media/solo6x10/Kconfig index 34f3b6d02d2a..ec32776ff547 100644 --- a/trunk/drivers/staging/media/solo6x10/Kconfig +++ b/trunk/drivers/staging/media/solo6x10/Kconfig @@ -1,11 +1,9 @@ config SOLO6X10 tristate "Softlogic 6x10 MPEG codec cards" depends on PCI && VIDEO_DEV && SND && I2C - depends on FONTS select VIDEOBUF2_DMA_SG select VIDEOBUF2_DMA_CONTIG select SND_PCM - select FONT_8x16 ---help--- This driver supports the Softlogic based MPEG-4 and h.264 codec cards. diff --git a/trunk/drivers/staging/nvec/nvec.c b/trunk/drivers/staging/nvec/nvec.c index 197c393c4ca7..a88959f9a07a 100644 --- a/trunk/drivers/staging/nvec/nvec.c +++ b/trunk/drivers/staging/nvec/nvec.c @@ -123,20 +123,6 @@ int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, } EXPORT_SYMBOL_GPL(nvec_register_notifier); -/** - * nvec_unregister_notifier - Unregister a notifier with nvec - * @nvec: A &struct nvec_chip - * @nb: The notifier block to unregister - * - * Unregisters a notifier with @nvec. The notifier will be removed from the - * atomic notifier chain. - */ -int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&nvec->notifier_list, nb); -} -EXPORT_SYMBOL_GPL(nvec_unregister_notifier); - /** * nvec_status_notifier - The final notifier * @@ -199,7 +185,7 @@ static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec, * * Free the given message */ -void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) +inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) { if (msg != &nvec->tx_scratch) dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool); @@ -814,6 +800,11 @@ static int tegra_nvec_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "no mem resource?\n"); + return -ENODEV; + } + base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); @@ -824,7 +815,7 @@ static int tegra_nvec_probe(struct platform_device *pdev) return -ENODEV; } - i2c_clk = devm_clk_get(&pdev->dev, "div-clk"); + i2c_clk = clk_get(&pdev->dev, "div-clk"); if (IS_ERR(i2c_clk)) { dev_err(nvec->dev, "failed to get controller clock\n"); return -ENODEV; @@ -911,11 +902,8 @@ static int tegra_nvec_remove(struct platform_device *pdev) nvec_toggle_global_events(nvec, false); mfd_remove_devices(nvec->dev); - nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier); cancel_work_sync(&nvec->rx_work); cancel_work_sync(&nvec->tx_work); - /* FIXME: needs check wether nvec is responsible for power off */ - pm_power_off = NULL; return 0; } diff --git a/trunk/drivers/staging/nvec/nvec.h b/trunk/drivers/staging/nvec/nvec.h index 2b1316d87470..b7a14bc0ab91 100644 --- a/trunk/drivers/staging/nvec/nvec.h +++ b/trunk/drivers/staging/nvec/nvec.h @@ -197,8 +197,9 @@ extern int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, unsigned int events); -extern int nvec_unregister_notifier(struct nvec_chip *dev, - struct notifier_block *nb); +extern int nvec_unregister_notifier(struct device *dev, + struct notifier_block *nb, + unsigned int events); extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg); diff --git a/trunk/drivers/staging/nvec/nvec_kbd.c b/trunk/drivers/staging/nvec/nvec_kbd.c index a0ec52a4114f..7445ce6422bb 100644 --- a/trunk/drivers/staging/nvec/nvec_kbd.c +++ b/trunk/drivers/staging/nvec/nvec_kbd.c @@ -169,15 +169,8 @@ static int nvec_kbd_probe(struct platform_device *pdev) static int nvec_kbd_remove(struct platform_device *pdev) { - struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); - char disable_kbd[] = { NVEC_KBD, DISABLE_KBD }, - uncnfg_wake_key_reporting[] = { NVEC_KBD, CNFG_WAKE_KEY_REPORTING, - false }; - nvec_write_async(nvec, uncnfg_wake_key_reporting, 3); - nvec_write_async(nvec, disable_kbd, 2); - nvec_unregister_notifier(nvec, &keys_dev.notifier); - input_unregister_device(keys_dev.input); + input_free_device(keys_dev.input); return 0; } @@ -195,5 +188,4 @@ module_platform_driver(nvec_kbd_driver); MODULE_AUTHOR("Marc Dietrich "); MODULE_DESCRIPTION("NVEC keyboard driver"); -MODULE_ALIAS("platform:nvec-kbd"); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/staging/nvec/nvec_power.c b/trunk/drivers/staging/nvec/nvec_power.c index aacfcd6954a3..296f7b9a8c8c 100644 --- a/trunk/drivers/staging/nvec/nvec_power.c +++ b/trunk/drivers/staging/nvec/nvec_power.c @@ -414,7 +414,6 @@ static int nvec_power_remove(struct platform_device *pdev) struct nvec_power *power = platform_get_drvdata(pdev); cancel_delayed_work_sync(&power->poller); - nvec_unregister_notifier(power->nvec, &power->notifier); switch (pdev->id) { case AC: power_supply_unregister(&nvec_psy); diff --git a/trunk/drivers/staging/nvec/nvec_ps2.c b/trunk/drivers/staging/nvec/nvec_ps2.c index 06dbb02085a9..aff6b9b9f9aa 100644 --- a/trunk/drivers/staging/nvec/nvec_ps2.c +++ b/trunk/drivers/staging/nvec/nvec_ps2.c @@ -106,7 +106,7 @@ static int nvec_mouse_probe(struct platform_device *pdev) struct serio *ser_dev; char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 }; - ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL); + ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); if (ser_dev == NULL) return -ENOMEM; @@ -133,11 +133,6 @@ static int nvec_mouse_probe(struct platform_device *pdev) static int nvec_mouse_remove(struct platform_device *pdev) { - struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); - - ps2_sendcommand(ps2_dev.ser_dev, DISABLE_MOUSE); - ps2_stopstreaming(ps2_dev.ser_dev); - nvec_unregister_notifier(nvec, &ps2_dev.notifier); serio_unregister_port(ps2_dev.ser_dev); return 0; @@ -184,5 +179,4 @@ module_platform_driver(nvec_mouse_driver); MODULE_DESCRIPTION("NVEC mouse driver"); MODULE_AUTHOR("Marc Dietrich "); -MODULE_ALIAS("platform:nvec-mouse"); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/staging/sep/Kconfig b/trunk/drivers/staging/sep/Kconfig index aab945a316ea..185b676d858a 100644 --- a/trunk/drivers/staging/sep/Kconfig +++ b/trunk/drivers/staging/sep/Kconfig @@ -1,6 +1,6 @@ config DX_SEP tristate "Discretix SEP driver" - depends on PCI && CRYPTO + depends on PCI help Discretix SEP driver; used for the security processor subsystem on board the Intel Mobile Internet Device and adds SEP availability diff --git a/trunk/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/trunk/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c index 386362c9964f..fe667dde43ce 100644 --- a/trunk/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c +++ b/trunk/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c @@ -1087,11 +1087,7 @@ static int synaptics_rmi4_resume(struct device *dev) unsigned char intr_status; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); - retval = regulator_enable(rmi4_data->regulator); - if (retval) { - dev_err(dev, "Regulator enable failed (%d)\n", retval); - return retval; - } + regulator_enable(rmi4_data->regulator); enable_irq(rmi4_data->i2c_client->irq); rmi4_data->touch_stopped = false; diff --git a/trunk/drivers/staging/vt6656/hostap.c b/trunk/drivers/staging/vt6656/hostap.c index c699a3058b39..f4f1bf7a30fd 100644 --- a/trunk/drivers/staging/vt6656/hostap.c +++ b/trunk/drivers/staging/vt6656/hostap.c @@ -133,7 +133,7 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->apdev->name); } - free_netdev(pDevice->apdev); + kfree(pDevice->apdev); pDevice->apdev = NULL; pDevice->bEnable8021x = false; pDevice->bEnableHostWEP = false; diff --git a/trunk/drivers/staging/vt6656/iwctl.c b/trunk/drivers/staging/vt6656/iwctl.c index d0cf7d8a20e5..c335808211ee 100644 --- a/trunk/drivers/staging/vt6656/iwctl.c +++ b/trunk/drivers/staging/vt6656/iwctl.c @@ -1345,12 +1345,9 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info, return rc; } - spin_lock_irq(&pDevice->lock); - if (wrq->disabled) { pDevice->ePSMode = WMAC_POWER_CAM; PSvDisablePowerSaving(pDevice); - spin_unlock_irq(&pDevice->lock); return rc; } if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { @@ -1361,9 +1358,6 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info, pDevice->ePSMode = WMAC_POWER_FAST; PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval); } - - spin_unlock_irq(&pDevice->lock); - switch (wrq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n"); diff --git a/trunk/drivers/staging/zcache/ramster.h b/trunk/drivers/staging/zcache/ramster.h index a858666eae68..e1f91d5a0f6a 100644 --- a/trunk/drivers/staging/zcache/ramster.h +++ b/trunk/drivers/staging/zcache/ramster.h @@ -11,6 +11,10 @@ #ifndef _ZCACHE_RAMSTER_H_ #define _ZCACHE_RAMSTER_H_ +#ifdef CONFIG_RAMSTER_MODULE +#define CONFIG_RAMSTER +#endif + #ifdef CONFIG_RAMSTER #include "ramster/ramster.h" #else diff --git a/trunk/drivers/staging/zcache/ramster/debug.c b/trunk/drivers/staging/zcache/ramster/debug.c index 5b26ee977c2f..327e4f0d98e1 100644 --- a/trunk/drivers/staging/zcache/ramster/debug.c +++ b/trunk/drivers/staging/zcache/ramster/debug.c @@ -1,8 +1,6 @@ #include #include "debug.h" -ssize_t ramster_foreign_eph_pages; -ssize_t ramster_foreign_pers_pages; #ifdef CONFIG_DEBUG_FS #include diff --git a/trunk/drivers/staging/zcache/ramster/ramster-howto.txt b/trunk/drivers/staging/zcache/ramster/ramster-howto.txt deleted file mode 100644 index 7b1ee3bbfdd5..000000000000 --- a/trunk/drivers/staging/zcache/ramster/ramster-howto.txt +++ /dev/null @@ -1,366 +0,0 @@ - RAMSTER HOW-TO - -Author: Dan Magenheimer -Ramster maintainer: Konrad Wilk - -This is a HOWTO document for ramster which, as of this writing, is in -the kernel as a subdirectory of zcache in drivers/staging, called ramster. -(Zcache can be built with or without ramster functionality.) If enabled -and properly configured, ramster allows memory capacity load balancing -across multiple machines in a cluster. Further, the ramster code serves -as an example of asynchronous access for zcache (as well as cleancache and -frontswap) that may prove useful for future transcendent memory -implementations, such as KVM and NVRAM. While ramster works today on -any network connection that supports kernel sockets, its features may -become more interesting on future high-speed fabrics/interconnects. - -Ramster requires both kernel and userland support. The userland support, -called ramster-tools, is known to work with EL6-based distros, but is a -set of poorly-hacked slightly-modified cluster tools based on ocfs2, which -includes an init file, a config file, and a userland binary that interfaces -to the kernel. This state of userland support reflects the abysmal userland -skills of this suitably-embarrassed author; any help/patches to turn -ramster-tools into more distributable rpms/debs useful for a wider range -of distros would be appreciated. The source RPM that can be used as a -starting point is available at: - http://oss.oracle.com/projects/tmem/files/RAMster/ - -As a result of this author's ignorance, userland setup described in this -HOWTO assumes an EL6 distro and is described in EL6 syntax. Apologies -if this offends anyone! - -Kernel support has only been tested on x86_64. Systems with an active -ocfs2 filesystem should work, but since ramster leverages a lot of -code from ocfs2, there may be latent issues. A kernel configuration that -includes CONFIG_OCFS2_FS should build OK, and should certainly run OK -if no ocfs2 filesystem is mounted. - -This HOWTO demonstrates memory capacity load balancing for a two-node -cluster, where one node called the "local" node becomes overcommitted -and the other node called the "remote" node provides additional RAM -capacity for use by the local node. Ramster is capable of more complex -topologies; see the last section titled "ADVANCED RAMSTER TOPOLOGIES". - -If you find any terms in this HOWTO unfamiliar or don't understand the -motivation for ramster, the following LWN reading is recommended: --- Transcendent Memory in a Nutshell (lwn.net/Articles/454795) --- The future calculus of memory management (lwn.net/Articles/475681) -And since ramster is built on top of zcache, this article may be helpful: --- In-kernel memory compression (lwn.net/Articles/545244) - -Now that you've memorized the contents of those articles, let's get started! - -A. PRELIMINARY - -1) Install two x86_64 Linux systems that are known to work when - upgraded to a recent upstream Linux kernel version. - -On each system: - -2) Configure, build and install, then boot Linux, just to ensure it - can be done with an unmodified upstream kernel. Confirm you booted - the upstream kernel with "uname -a". - -3) If you plan to do any performance testing or unless you plan to - test only swapping, the "WasActive" patch is also highly recommended. - (Search lkml.org for WasActive, apply the patch, rebuild your kernel.) - For a demo or simple testing, the patch can be ignored. - -4) Install ramster-tools as root. An x86_64 rpm for EL6-based systems - can be found at: - http://oss.oracle.com/projects/tmem/files/RAMster/ - (Sorry but for now, non-EL6 users must recreate ramster-tools on - their own from source. See above.) - -5) Ensure that debugfs is mounted at each boot. Examples below assume it - is mounted at /sys/kernel/debug. - -B. BUILDING RAMSTER INTO THE KERNEL - -Do the following on each system: - -1) Using the kernel configuration mechanism of your choice, change - your config to include: - - CONFIG_CLEANCACHE=y - CONFIG_FRONTSWAP=y - CONFIG_STAGING=y - CONFIG_CONFIGFS_FS=y # NOTE: MUST BE y, not m - CONFIG_ZCACHE=y - CONFIG_RAMSTER=y - - For a linux-3.10 or later kernel, you should also set: - - CONFIG_ZCACHE_DEBUG=y - CONFIG_RAMSTER_DEBUG=y - - Before building the kernel please doublecheck your kernel config - file to ensure all of the settings are correct. - -2) Build this kernel and change your boot file (e.g. /etc/grub.conf) - so that the new kernel will boot. - -3) Add "zcache" and "ramster" as kernel boot parameters for the new kernel. - -4) Reboot each system approximately simultaneously. - -5) Check dmesg to ensure there are some messages from ramster, prefixed - by "ramster:" - - # dmesg | grep ramster - - You should also see a lot of files in: - - # ls /sys/kernel/debug/zcache - # ls /sys/kernel/debug/ramster - - These are mostly counters for various zcache and ramster activities. - You should also see files in: - - # ls /sys/kernel/mm/ramster - - These are sysfs files that control ramster as we shall see. - - Ramster now will act as a single-system zcache on each system - but doesn't yet know anything about the cluster so can't yet do - anything remotely. - -C. CONFIGURING THE RAMSTER CLUSTER - -This part can be error prone unless you are familiar with clustering -filesystems. We need to describe the cluster in a /etc/ramster.conf -file and the init scripts that parse it are extremely picky about -the syntax. - -1) Create a /etc/ramster.conf file and ensure it is identical on both - systems. This file mimics the ocfs2 format and there is a good amount - of documentation that can be searched for ocfs2.conf, but you can use: - - cluster: - name = ramster - node_count = 2 - node: - name = system1 - cluster = ramster - number = 0 - ip_address = my.ip.ad.r1 - ip_port = 7777 - node: - name = system2 - cluster = ramster - number = 1 - ip_address = my.ip.ad.r2 - ip_port = 7777 - - You must ensure that the "name" field in the file exactly matches - the output of "hostname" on each system; if "hostname" shows a - fully-qualified hostname, ensure the name is fully qualified in - /etc/ramster.conf. Obviously, substitute my.ip.ad.rx with proper - ip addresses. - -2) Enable the ramster service and configure it. If you used the - EL6 ramster-tools, this would be: - - # chkconfig --add ramster - # service ramster configure - - Set "load on boot" to "y", cluster to start is "ramster" (or whatever - name you chose in ramster.conf), heartbeat dead threshold as "500", - network idle timeout as "1000000". Leave the others as default. - -3) Reboot both systems. After reboot, try (assuming EL6 ramster-tools): - - # service ramster status - - You should see "Checking RAMSTER cluster "ramster": Online". If you do - not, something is wrong and ramster will not work. Note that you - should also see that the driver for "configfs" is loaded and mounted, - the driver for ocfs2_dlmfs is not loaded, and some numbers for network - parameters. You will also see "Checking RAMSTER heartbeat: Not active". - That's all OK. - -4) Now you need to start the cluster heartbeat; the cluster is not "up" - until all nodes detect a heartbeat. In a real cluster, heartbeat detection - is done via a cluster filesystem, but ramster doesn't require one. Some - hack-y kernel code in ramster can start the heartbeat for you though if - you tell it what nodes are "up". To enable the heartbeat, do: - - # echo 0 > /sys/kernel/mm/ramster/manual_node_up - # echo 1 > /sys/kernel/mm/ramster/manual_node_up - - This must be done on BOTH nodes and, to avoid timeouts, must be done - approximately concurrently on both nodes. On an EL6 system, it is - convenient to put these lines in /etc/rc.local. To confirm that the - cluster is now up, on both systems do: - - # dmesg | grep ramster - - You should see ramster "Accepted connection" messages in dmesg on both - nodes after this. Note that if you check userland status again with - - # service ramster status - - you will still see "Checking RAMSTER heartbeat: Not active". That's - still OK... the ramster kernel heartbeat hack doesn't communicate to - userland. - -5) You now must tell each node the node to which it should "remotify" pages. - On this two node cluster, we will assume the "local" node, node 0, has - memory overcommitted and will use ramster to utilize RAM capacity on - the "remote node", node 1. To configure this, on node 0, you do: - - # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum - - You should see "ramster: node 1 set as remotification target" in dmesg - on node 0. Again, on EL6, /etc/rc.local is a good place to put this - on node 0 so you don't forget to do it at each boot. - -6) One more step: By default, the ramster code does not "remotify" any - pages; this is primarily for testing purposes, but sometimes it is - useful. This may change in the future, but for now, on node 0, you do: - - # echo 1 > /sys/kernel/mm/ramster/pers_remotify_enable - # echo 1 > /sys/kernel/mm/ramster/eph_remotify_enable - - The first enables remotifying swap (persistent, aka frontswap) pages, - the second enables remotifying of page cache (ephemeral, cleancache) - pages. - - On EL6, these lines can also be put in /etc/rc.local (AFTER the - node_up lines), or at the beginning of a script that runs a workload. - -7) Note that most testing has been done with both/all machines booted - roughly simultaneously to avoid cluster timeouts. Ideally, you should - do this too unless you are trying to break ramster rather than just - use it. ;-) - -D. TESTING RAMSTER - -1) Note that ramster has no value unless pages get "remotified". For - swap/frontswap/persistent pages, this doesn't happen unless/until - the workload would cause swapping to occur, at which point pages - are put into frontswap/zcache, and the remotification thread starts - working. To get to the point where the system swaps, you either - need a workload for which the working set exceeds the RAM in the - system; or you need to somehow reduce the amount of RAM one of - the system sees. This latter is easy when testing in a VM, but - harder on physical systems. In some cases, "mem=xxxM" on the - kernel command line restricts memory, but for some values of xxx - the kernel may fail to boot. One may also try creating a fixed - RAMdisk, doing nothing with it, but ensuring that it eats up a fixed - amount of RAM. - -2) To see if ramster is working, on the "remote node", node 1, try: - - # grep . /sys/kernel/debug/ramster/foreign_* - # # note, that is space-dot-space between grep and the pathname - - to monitor the number (and max) ephemeral and persistent pages - that ramster has sent. If these stay at zero, ramster is not working - either because the workload on the local node (node 0) isn't creating - enough memory pressure or because "remotifying" isn't working. On the - local system, node 0, you can watch lots of useful information also. - Try: - - grep . /sys/kernel/debug/zcache/*pageframes* \ - /sys/kernel/debug/zcache/*zbytes* \ - /sys/kernel/debug/zcache/*zpages* \ - /sys/kernel/debug/ramster/*remote* - - Of particular note are the remote_*_pages_succ_get counters. These - show how many disk reads and/or disk writes have been avoided on the - overcommitted local system by storing pages remotely using ramster. - - At the risk of information overload, you can also grep: - - /sys/kernel/debug/cleancache/* and /sys/kernel/debug/frontswap/* - - These show, for example, how many disk reads and/or disk writes have - been avoided by using zcache to optimize RAM on the local system. - - -AUTOMATIC SWAP REPATRIATION - -You may notice that while the systems are idle, the foreign persistent -page count on the remote machine slowly decreases. This is because -ramster implements "frontswap selfshrinking": When possible, swap -pages that have been remotified are slowly repatriated to the local -machine. This is so that local RAM can be used when possible and -so that, in case of remote machine crash, the probability of loss -of data is reduced. - -REBOOTING / POWEROFF - -If a system is shut down while some of its swap pages still reside -on a remote system, the system may lock up during the shutdown -sequence. This will occur if the network is shut down before the -swap mechansim is shut down, which is the default ordering on many -distros. To avoid this annoying problem, simply shut off the swap -subsystem before starting the shutdown sequence, e.g.: - - # swapoff -a - # reboot - -Ideally, this swapoff-before-ifdown ordering should be enforced permanently -using shutdown scripts. - -KNOWN PROBLEMS - -1) You may periodically see messages such as: - - ramster_r2net, message length problem - - This is harmless but indicates that a node is sending messages - containing compressed pages that exceed the maximum for zcache - (PAGE_SIZE*15/16). The sender side needs to be fixed. - -2) If you see a "No longer connected to node..." message or a "No connection - established with node X after N seconds", it is possible you may - be in an unrecoverable state. If you are certain all of the - appropriate cluster configuration steps described above have been - performed, try rebooting the two servers concurrently to see if - the cluster starts. - - Note that "Connection to node... shutdown, state 7" is an intermediate - connection state. As long as you later see "Accepted connection", the - intermediate states are harmless. - -3) There are known issues in counting certain values. As a result - you may see periodic warnings from the kernel. Almost always you - will see "ramster: bad accounting for XXX". There are also "WARN_ONCE" - messages. If you see kernel warnings with a tombstone, please report - them. They are harmless but reflect bugs that need to be eventually fixed. - -ADVANCED RAMSTER TOPOLOGIES - -The kernel code for ramster can support up to eight nodes in a cluster, -but no testing has been done with more than three nodes. - -In the example described above, the "remote" node serves as a RAM -overflow for the "local" node. This can be made symmetric by appropriate -settings of the sysfs remote_target_nodenum file. For example, by setting: - - # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum - -on node 0, and - - # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum - -on node 1, each node can serve as a RAM overflow for the other. - -For more than two nodes, a "RAM server" can be configured. For a -three node system, set: - - # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum - -on node 1, and - - # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum - -on node 2. Then node 0 is a RAM server for node 1 and node 2. - -In this implementation of ramster, any remote node is potentially a single -point of failure (SPOF). Though the probability of failure is reduced -by automatic swap repatriation (see above), a proposed future enhancement -to ramster improves high-availability for the cluster by sending a copy -of each page of date to two other nodes. Patches welcome! diff --git a/trunk/drivers/staging/zcache/ramster/ramster.c b/trunk/drivers/staging/zcache/ramster/ramster.c index a937ce1fa27a..b18b887db79f 100644 --- a/trunk/drivers/staging/zcache/ramster/ramster.c +++ b/trunk/drivers/staging/zcache/ramster/ramster.c @@ -66,6 +66,8 @@ static int ramster_remote_target_nodenum __read_mostly = -1; /* Used by this code. */ long ramster_flnodes; +ssize_t ramster_foreign_eph_pages; +ssize_t ramster_foreign_pers_pages; /* FIXME frontswap selfshrinking knobs in debugfs? */ static LIST_HEAD(ramster_rem_op_list); @@ -397,18 +399,14 @@ void ramster_count_foreign_pages(bool eph, int count) inc_ramster_foreign_eph_pages(); } else { dec_ramster_foreign_eph_pages(); -#ifdef CONFIG_RAMSTER_DEBUG WARN_ON_ONCE(ramster_foreign_eph_pages < 0); -#endif } } else { if (count > 0) { inc_ramster_foreign_pers_pages(); } else { dec_ramster_foreign_pers_pages(); -#ifdef CONFIG_RAMSTER_DEBUG WARN_ON_ONCE(ramster_foreign_pers_pages < 0); -#endif } } } diff --git a/trunk/drivers/staging/zcache/zcache-main.c b/trunk/drivers/staging/zcache/zcache-main.c index dcceed29d31a..522cb8e55142 100644 --- a/trunk/drivers/staging/zcache/zcache-main.c +++ b/trunk/drivers/staging/zcache/zcache-main.c @@ -1922,15 +1922,15 @@ static int zcache_init(void) #ifdef CONFIG_ZCACHE_MODULE #ifdef CONFIG_RAMSTER -module_param(ramster_enabled, bool, S_IRUGO); +module_param(ramster_enabled, int, S_IRUGO); module_param(disable_frontswap_selfshrink, int, S_IRUGO); #endif -module_param(disable_cleancache, bool, S_IRUGO); -module_param(disable_frontswap, bool, S_IRUGO); +module_param(disable_cleancache, int, S_IRUGO); +module_param(disable_frontswap, int, S_IRUGO); #ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS module_param(frontswap_has_exclusive_gets, bool, S_IRUGO); #endif -module_param(disable_frontswap_ignore_nonactive, bool, S_IRUGO); +module_param(disable_frontswap_ignore_nonactive, int, S_IRUGO); module_param(zcache_comp_name, charp, S_IRUGO); module_init(zcache_init); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/target/iscsi/iscsi_target.c b/trunk/drivers/target/iscsi/iscsi_target.c index d7705e5824fb..ffbc6a94be52 100644 --- a/trunk/drivers/target/iscsi/iscsi_target.c +++ b/trunk/drivers/target/iscsi/iscsi_target.c @@ -651,7 +651,7 @@ static int iscsit_add_reject( cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); if (!cmd->buf_ptr) { pr_err("Unable to allocate memory for cmd->buf_ptr\n"); - iscsit_free_cmd(cmd, false); + iscsit_release_cmd(cmd); return -1; } @@ -697,7 +697,7 @@ int iscsit_add_reject_from_cmd( cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); if (!cmd->buf_ptr) { pr_err("Unable to allocate memory for cmd->buf_ptr\n"); - iscsit_free_cmd(cmd, false); + iscsit_release_cmd(cmd); return -1; } @@ -1250,7 +1250,7 @@ static u32 iscsit_do_crypto_hash_sg( static void iscsit_do_crypto_hash_buf( struct hash_desc *hash, - const void *buf, + unsigned char *buf, u32 payload_length, u32 padding, u8 *pad_bytes, @@ -1743,7 +1743,7 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, return 0; out: if (cmd) - iscsit_free_cmd(cmd, false); + iscsit_release_cmd(cmd); ping_out: kfree(ping_data); return ret; @@ -2251,7 +2251,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { pr_err("Received logout request on connection that" " is not in logged in state, ignoring request.\n"); - iscsit_free_cmd(cmd, false); + iscsit_release_cmd(cmd); return 0; } @@ -2524,8 +2524,9 @@ static int iscsit_send_conn_drop_async_message( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)hdr, ISCSI_HDR_LEN, + 0, NULL, (u8 *)header_digest); cmd->tx_size += ISCSI_CRC_LEN; pr_debug("Attaching CRC32C HeaderDigest to" @@ -2661,8 +2662,9 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, + 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -2839,8 +2841,9 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0], - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN, + 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -2897,8 +2900,9 @@ static int iscsit_send_unsolicited_nopin( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)hdr, ISCSI_HDR_LEN, + 0, NULL, (u8 *)header_digest); tx_size += ISCSI_CRC_LEN; pr_debug("Attaching CRC32C HeaderDigest to" @@ -2945,8 +2949,9 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)hdr, ISCSI_HDR_LEN, + 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3035,8 +3040,9 @@ static int iscsit_send_r2t( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)hdr, ISCSI_HDR_LEN, + 0, NULL, (u8 *)header_digest); cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3250,8 +3256,9 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, + 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3322,8 +3329,9 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)hdr, ISCSI_HDR_LEN, + 0, NULL, (u8 *)header_digest); cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3496,8 +3504,9 @@ static int iscsit_send_text_rsp( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)hdr, ISCSI_HDR_LEN, + 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3548,11 +3557,11 @@ static int iscsit_send_reject( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { - struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; + u32 iov_count = 0, tx_size = 0; + struct iscsi_reject *hdr; struct kvec *iov; - u32 iov_count = 0, tx_size; - iscsit_build_reject(cmd, conn, hdr); + iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]); iov = &cmd->iov_misc[0]; iov[iov_count].iov_base = cmd->pdu; @@ -3565,8 +3574,9 @@ static int iscsit_send_reject( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)hdr, ISCSI_HDR_LEN, + 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3575,8 +3585,9 @@ static int iscsit_send_reject( } if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, + (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, + 0, NULL, (u8 *)&cmd->data_crc); iov[iov_count].iov_base = &cmd->data_crc; iov[iov_count++].iov_len = ISCSI_CRC_LEN; @@ -3665,7 +3676,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd, false); + iscsit_free_cmd(cmd); break; case ISTATE_SEND_NOPIN_WANT_RESPONSE: iscsit_mod_nopin_response_timer(conn); @@ -4122,7 +4133,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) iscsit_increment_maxcmdsn(cmd, sess); - iscsit_free_cmd(cmd, true); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); } diff --git a/trunk/drivers/target/iscsi/iscsi_target_configfs.c b/trunk/drivers/target/iscsi/iscsi_target_configfs.c index 8d8b3ff68490..13e9e715ad2e 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_configfs.c +++ b/trunk/drivers/target/iscsi/iscsi_target_configfs.c @@ -155,7 +155,7 @@ static ssize_t lio_target_np_store_iser( struct iscsi_tpg_np *tpg_np_iser = NULL; char *endptr; u32 op; - int rc = 0; + int rc; op = simple_strtoul(page, &endptr, 0); if ((op != 1) && (op != 0)) { @@ -174,32 +174,31 @@ static ssize_t lio_target_np_store_iser( return -EINVAL; if (op) { - rc = request_module("ib_isert"); - if (rc != 0) { + int rc = request_module("ib_isert"); + if (rc != 0) pr_warn("Unable to request_module for ib_isert\n"); - rc = 0; - } tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, np->np_ip, tpg_np, ISCSI_INFINIBAND); - if (IS_ERR(tpg_np_iser)) { - rc = PTR_ERR(tpg_np_iser); + if (!tpg_np_iser || IS_ERR(tpg_np_iser)) goto out; - } } else { tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); - if (tpg_np_iser) { - rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); - if (rc < 0) - goto out; - } + if (!tpg_np_iser) + goto out; + + rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); + if (rc < 0) + goto out; } + printk("lio_target_np_store_iser() done, op: %d\n", op); + iscsit_put_tpg(tpg); return count; out: iscsit_put_tpg(tpg); - return rc; + return -EINVAL; } TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR); diff --git a/trunk/drivers/target/iscsi/iscsi_target_erl0.c b/trunk/drivers/target/iscsi/iscsi_target_erl0.c index dcb199da06b9..8e6298cc8839 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_erl0.c +++ b/trunk/drivers/target/iscsi/iscsi_target_erl0.c @@ -842,11 +842,11 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess) return 0; sess->time2retain_timer_flags |= ISCSI_TF_STOP; - spin_unlock(&se_tpg->session_lock); + spin_unlock_bh(&se_tpg->session_lock); del_timer_sync(&sess->time2retain_timer); - spin_lock(&se_tpg->session_lock); + spin_lock_bh(&se_tpg->session_lock); sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; pr_debug("Stopped Time2Retain Timer for SID: %u\n", sess->sid); diff --git a/trunk/drivers/target/iscsi/iscsi_target_erl1.c b/trunk/drivers/target/iscsi/iscsi_target_erl1.c index 40d9dbca987b..7816af6cdd12 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_erl1.c +++ b/trunk/drivers/target/iscsi/iscsi_target_erl1.c @@ -823,7 +823,7 @@ static int iscsit_attach_ooo_cmdsn( /* * CmdSN is greater than the tail of the list. */ - if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn)) + if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn) list_add_tail(&ooo_cmdsn->ooo_list, &sess->sess_ooo_cmdsn_list); else { @@ -833,12 +833,11 @@ static int iscsit_attach_ooo_cmdsn( */ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, ooo_list) { - if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn)) + if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) continue; - /* Insert before this entry */ list_add(&ooo_cmdsn->ooo_list, - ooo_tmp->ooo_list.prev); + &ooo_tmp->ooo_list); break; } } diff --git a/trunk/drivers/target/iscsi/iscsi_target_erl2.c b/trunk/drivers/target/iscsi/iscsi_target_erl2.c index 45a5afd5ea13..ba6091bf93fc 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_erl2.c +++ b/trunk/drivers/target/iscsi/iscsi_target_erl2.c @@ -143,7 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_del(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - iscsit_free_cmd(cmd, true); + iscsit_free_cmd(cmd); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -165,7 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_del(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - iscsit_free_cmd(cmd, true); + iscsit_free_cmd(cmd); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -248,7 +248,7 @@ void iscsit_discard_cr_cmds_by_expstatsn( iscsit_remove_cmd_from_connection_recovery(cmd, sess); spin_unlock(&cr->conn_recovery_cmd_lock); - iscsit_free_cmd(cmd, true); + iscsit_free_cmd(cmd); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -302,7 +302,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd, true); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); } spin_unlock_bh(&conn->cmd_lock); @@ -355,7 +355,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd, true); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); continue; } @@ -375,7 +375,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd, true); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); continue; } diff --git a/trunk/drivers/target/iscsi/iscsi_target_login.c b/trunk/drivers/target/iscsi/iscsi_target_login.c index 3402241be87c..bb5d5c5bce65 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_login.c +++ b/trunk/drivers/target/iscsi/iscsi_target_login.c @@ -984,6 +984,8 @@ int iscsi_target_setup_login_socket( } np->np_transport = t; + printk("Set np->np_transport to %p -> %s\n", np->np_transport, + np->np_transport->name); return 0; } @@ -1000,6 +1002,7 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) conn->sock = new_sock; conn->login_family = np->np_sockaddr.ss_family; + printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock); if (np->np_sockaddr.ss_family == AF_INET6) { memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); diff --git a/trunk/drivers/target/iscsi/iscsi_target_nego.c b/trunk/drivers/target/iscsi/iscsi_target_nego.c index cd5018ff9cd7..7ad912060e21 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_nego.c +++ b/trunk/drivers/target/iscsi/iscsi_target_nego.c @@ -721,6 +721,9 @@ int iscsi_target_locate_portal( start += strlen(key) + strlen(value) + 2; } + + printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf); + /* * See 5.3. Login Phase. */ diff --git a/trunk/drivers/target/iscsi/iscsi_target_parameters.c b/trunk/drivers/target/iscsi/iscsi_target_parameters.c index e38222191a33..f690be9e5293 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_parameters.c +++ b/trunk/drivers/target/iscsi/iscsi_target_parameters.c @@ -436,7 +436,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) /* * Extra parameters for ISER from RFC-5046 */ - param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS, + param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS, PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND, USE_LEADING_ONLY); if (!param) @@ -529,7 +529,7 @@ int iscsi_set_keys_to_negotiate( SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, OFMARKINT)) { SET_PSTATE_NEGOTIATE(param); - } else if (!strcmp(param->name, RDMAEXTENSIONS)) { + } else if (!strcmp(param->name, RDMAEXTENTIONS)) { if (iser == true) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { @@ -580,7 +580,7 @@ int iscsi_set_keys_irrelevant_for_discovery( param->state &= ~PSTATE_NEGOTIATE; else if (!strcmp(param->name, OFMARKINT)) param->state &= ~PSTATE_NEGOTIATE; - else if (!strcmp(param->name, RDMAEXTENSIONS)) + else if (!strcmp(param->name, RDMAEXTENTIONS)) param->state &= ~PSTATE_NEGOTIATE; else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) param->state &= ~PSTATE_NEGOTIATE; @@ -758,9 +758,9 @@ static int iscsi_add_notunderstood_response( } INIT_LIST_HEAD(&extra_response->er_list); - strlcpy(extra_response->key, key, sizeof(extra_response->key)); - strlcpy(extra_response->value, NOTUNDERSTOOD, - sizeof(extra_response->value)); + strncpy(extra_response->key, key, strlen(key) + 1); + strncpy(extra_response->value, NOTUNDERSTOOD, + strlen(NOTUNDERSTOOD) + 1); list_add_tail(&extra_response->er_list, ¶m_list->extra_response_list); @@ -1629,6 +1629,8 @@ int iscsi_decode_text_input( if (phase & PHASE_SECURITY) { if (iscsi_check_for_auth_key(key) > 0) { + char *tmpptr = key + strlen(key); + *tmpptr = '='; kfree(tmpbuf); return 1; } @@ -1975,7 +1977,7 @@ void iscsi_set_session_parameters( ops->SessionType = !strcmp(param->value, DISCOVERY); pr_debug("SessionType: %s\n", param->value); - } else if (!strcmp(param->name, RDMAEXTENSIONS)) { + } else if (!strcmp(param->name, RDMAEXTENTIONS)) { ops->RDMAExtensions = !strcmp(param->value, YES); pr_debug("RDMAExtensions: %s\n", param->value); diff --git a/trunk/drivers/target/iscsi/iscsi_target_parameters.h b/trunk/drivers/target/iscsi/iscsi_target_parameters.h index a47046a752aa..f31b9c4b83f2 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_parameters.h +++ b/trunk/drivers/target/iscsi/iscsi_target_parameters.h @@ -1,10 +1,8 @@ #ifndef ISCSI_PARAMETERS_H #define ISCSI_PARAMETERS_H -#include - struct iscsi_extra_response { - char key[KEY_MAXLEN]; + char key[64]; char value[32]; struct list_head er_list; } ____cacheline_aligned; @@ -93,7 +91,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, /* * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 */ -#define RDMAEXTENSIONS "RDMAExtensions" +#define RDMAEXTENTIONS "RDMAExtensions" #define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" #define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" @@ -144,7 +142,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, /* * Initial values for iSER parameters following RFC-5046 Section 6 */ -#define INITIAL_RDMAEXTENSIONS NO +#define INITIAL_RDMAEXTENTIONS NO #define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" #define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" diff --git a/trunk/drivers/target/iscsi/iscsi_target_util.c b/trunk/drivers/target/iscsi/iscsi_target_util.c index 08a3bacef0c5..2cc6c9a3ffb8 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_util.c +++ b/trunk/drivers/target/iscsi/iscsi_target_util.c @@ -676,56 +676,40 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) void iscsit_release_cmd(struct iscsi_cmd *cmd) { + struct iscsi_conn *conn = cmd->conn; + + iscsit_free_r2ts_from_list(cmd); + iscsit_free_all_datain_reqs(cmd); + kfree(cmd->buf_ptr); kfree(cmd->pdu_list); kfree(cmd->seq_list); kfree(cmd->tmr_req); kfree(cmd->iov_data); - kmem_cache_free(lio_cmd_cache, cmd); -} - -static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, - bool check_queues) -{ - struct iscsi_conn *conn = cmd->conn; - - if (scsi_cmd) { - if (cmd->data_direction == DMA_TO_DEVICE) { - iscsit_stop_dataout_timer(cmd); - iscsit_free_r2ts_from_list(cmd); - } - if (cmd->data_direction == DMA_FROM_DEVICE) - iscsit_free_all_datain_reqs(cmd); - } - - if (conn && check_queues) { + if (conn) { iscsit_remove_cmd_from_immediate_queue(cmd, conn); iscsit_remove_cmd_from_response_queue(cmd, conn); } + + kmem_cache_free(lio_cmd_cache, cmd); } -void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) +void iscsit_free_cmd(struct iscsi_cmd *cmd) { - struct se_cmd *se_cmd = NULL; - int rc; /* * Determine if a struct se_cmd is associated with * this struct iscsi_cmd. */ switch (cmd->iscsi_opcode) { case ISCSI_OP_SCSI_CMD: - se_cmd = &cmd->se_cmd; - __iscsit_free_cmd(cmd, true, shutdown); + if (cmd->data_direction == DMA_TO_DEVICE) + iscsit_stop_dataout_timer(cmd); /* * Fallthrough */ case ISCSI_OP_SCSI_TMFUNC: - rc = transport_generic_free_cmd(&cmd->se_cmd, 1); - if (!rc && shutdown && se_cmd && se_cmd->se_sess) { - __iscsit_free_cmd(cmd, true, shutdown); - target_put_sess_cmd(se_cmd->se_sess, se_cmd); - } + transport_generic_free_cmd(&cmd->se_cmd, 1); break; case ISCSI_OP_REJECT: /* @@ -734,19 +718,11 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) * associated cmd->se_cmd needs to be released. */ if (cmd->se_cmd.se_tfo != NULL) { - se_cmd = &cmd->se_cmd; - __iscsit_free_cmd(cmd, true, shutdown); - - rc = transport_generic_free_cmd(&cmd->se_cmd, 1); - if (!rc && shutdown && se_cmd->se_sess) { - __iscsit_free_cmd(cmd, true, shutdown); - target_put_sess_cmd(se_cmd->se_sess, se_cmd); - } + transport_generic_free_cmd(&cmd->se_cmd, 1); break; } /* Fall-through */ default: - __iscsit_free_cmd(cmd, false, shutdown); cmd->release_cmd(cmd); break; } diff --git a/trunk/drivers/target/iscsi/iscsi_target_util.h b/trunk/drivers/target/iscsi/iscsi_target_util.h index a4422659d049..4f8e01a47081 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_util.h +++ b/trunk/drivers/target/iscsi/iscsi_target_util.h @@ -29,7 +29,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); extern void iscsit_release_cmd(struct iscsi_cmd *); -extern void iscsit_free_cmd(struct iscsi_cmd *, bool); +extern void iscsit_free_cmd(struct iscsi_cmd *); extern int iscsit_check_session_usage_count(struct iscsi_session *); extern void iscsit_dec_session_usage_count(struct iscsi_session *); extern void iscsit_inc_session_usage_count(struct iscsi_session *); diff --git a/trunk/drivers/target/target_core_configfs.c b/trunk/drivers/target/target_core_configfs.c index 4a8bd36d3958..43b7ac6c5b1c 100644 --- a/trunk/drivers/target/target_core_configfs.c +++ b/trunk/drivers/target/target_core_configfs.c @@ -1584,13 +1584,6 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = { .store = target_core_store_dev_udev_path, }; -static ssize_t target_core_show_dev_enable(void *p, char *page) -{ - struct se_device *dev = p; - - return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED)); -} - static ssize_t target_core_store_dev_enable( void *p, const char *page, @@ -1616,8 +1609,8 @@ static ssize_t target_core_store_dev_enable( static struct target_core_configfs_attribute target_core_attr_dev_enable = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "enable", - .ca_mode = S_IRUGO | S_IWUSR }, - .show = target_core_show_dev_enable, + .ca_mode = S_IWUSR }, + .show = NULL, .store = target_core_store_dev_enable, }; diff --git a/trunk/drivers/target/target_core_device.c b/trunk/drivers/target/target_core_device.c index 4630481b6043..2e4d655471bc 100644 --- a/trunk/drivers/target/target_core_device.c +++ b/trunk/drivers/target/target_core_device.c @@ -68,6 +68,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) struct se_dev_entry *deve = se_cmd->se_deve; deve->total_cmds++; + deve->total_bytes += se_cmd->data_length; if ((se_cmd->data_direction == DMA_TO_DEVICE) && (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { @@ -84,6 +85,8 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) else if (se_cmd->data_direction == DMA_FROM_DEVICE) deve->read_bytes += se_cmd->data_length; + deve->deve_cmds++; + se_lun = deve->se_lun; se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; @@ -272,6 +275,17 @@ int core_free_device_list_for_node( return 0; } +void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) +{ + struct se_dev_entry *deve; + unsigned long flags; + + spin_lock_irqsave(&se_nacl->device_list_lock, flags); + deve = se_nacl->device_list[se_cmd->orig_fe_lun]; + deve->deve_cmds--; + spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); +} + void core_update_device_list_access( u32 mapped_lun, u32 lun_access, diff --git a/trunk/drivers/target/target_core_file.c b/trunk/drivers/target/target_core_file.c index b11890d85120..58ed683e04ae 100644 --- a/trunk/drivers/target/target_core_file.c +++ b/trunk/drivers/target/target_core_file.c @@ -153,7 +153,10 @@ static int fd_configure_device(struct se_device *dev) struct request_queue *q = bdev_get_queue(inode->i_bdev); unsigned long long dev_size; - fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); + dev->dev_attrib.hw_block_size = + bdev_logical_block_size(inode->i_bdev); + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); + /* * Determine the number of bytes from i_size_read() minus * one (1) logical sector from underlying struct block_device @@ -200,7 +203,9 @@ static int fd_configure_device(struct se_device *dev) goto fail; } - fd_dev->fd_block_size = FD_BLOCKSIZE; + dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; + dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; + /* * Limit UNMAP emulation to 8k Number of LBAs (NoLB) */ @@ -219,8 +224,8 @@ static int fd_configure_device(struct se_device *dev) dev->dev_attrib.max_write_same_len = 0x1000; } - dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; - dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; + fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; + dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { @@ -694,12 +699,11 @@ static sector_t fd_get_blocks(struct se_device *dev) * to handle underlying block_device resize operations. */ if (S_ISBLK(i->i_mode)) - dev_size = i_size_read(i); + dev_size = (i_size_read(i) - fd_dev->fd_block_size); else dev_size = fd_dev->fd_dev_size; - return div_u64(dev_size - dev->dev_attrib.block_size, - dev->dev_attrib.block_size); + return div_u64(dev_size, dev->dev_attrib.block_size); } static struct sbc_ops fd_sbc_ops = { diff --git a/trunk/drivers/target/target_core_iblock.c b/trunk/drivers/target/target_core_iblock.c index aa1620abec6d..07f5f94634bb 100644 --- a/trunk/drivers/target/target_core_iblock.c +++ b/trunk/drivers/target/target_core_iblock.c @@ -615,8 +615,6 @@ iblock_execute_rw(struct se_cmd *cmd) rw = WRITE_FUA; else if (!(q->flush_flags & REQ_FLUSH)) rw = WRITE_FUA; - else - rw = WRITE; } else { rw = WRITE; } diff --git a/trunk/drivers/target/target_core_internal.h b/trunk/drivers/target/target_core_internal.h index 18d49df4d0ac..853bab60e362 100644 --- a/trunk/drivers/target/target_core_internal.h +++ b/trunk/drivers/target/target_core_internal.h @@ -8,6 +8,7 @@ extern struct t10_alua_lu_gp *default_lu_gp; struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); int core_free_device_list_for_node(struct se_node_acl *, struct se_portal_group *); +void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); void core_update_device_list_access(u32, u32, struct se_node_acl *); int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32, u32, struct se_node_acl *, struct se_portal_group *); diff --git a/trunk/drivers/target/target_core_rd.c b/trunk/drivers/target/target_core_rd.c index 0921a64b5550..e0b3c379aa14 100644 --- a/trunk/drivers/target/target_core_rd.c +++ b/trunk/drivers/target/target_core_rd.c @@ -291,11 +291,6 @@ rd_execute_rw(struct se_cmd *cmd) u32 src_len; u64 tmp; - if (dev->rd_flags & RDF_NULLIO) { - target_complete_cmd(cmd, SAM_STAT_GOOD); - return 0; - } - tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; rd_offset = do_div(tmp, PAGE_SIZE); rd_page = tmp; @@ -378,12 +373,11 @@ rd_execute_rw(struct se_cmd *cmd) } enum { - Opt_rd_pages, Opt_rd_nullio, Opt_err + Opt_rd_pages, Opt_err }; static match_table_t tokens = { {Opt_rd_pages, "rd_pages=%d"}, - {Opt_rd_nullio, "rd_nullio=%d"}, {Opt_err, NULL} }; @@ -414,14 +408,6 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev, " Count: %u\n", rd_dev->rd_page_count); rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; break; - case Opt_rd_nullio: - match_int(args, &arg); - if (arg != 1) - break; - - pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg); - rd_dev->rd_flags |= RDF_NULLIO; - break; default: break; } @@ -438,9 +424,8 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", rd_dev->rd_dev_id); bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" - " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count, - PAGE_SIZE, rd_dev->sg_table_count, - !!(rd_dev->rd_flags & RDF_NULLIO)); + " SG_table_count: %u\n", rd_dev->rd_page_count, + PAGE_SIZE, rd_dev->sg_table_count); return bl; } diff --git a/trunk/drivers/target/target_core_rd.h b/trunk/drivers/target/target_core_rd.h index 1789d1e14395..933b38b6e563 100644 --- a/trunk/drivers/target/target_core_rd.h +++ b/trunk/drivers/target/target_core_rd.h @@ -22,7 +22,6 @@ struct rd_dev_sg_table { } ____cacheline_aligned; #define RDF_HAS_PAGE_COUNT 0x01 -#define RDF_NULLIO 0x02 struct rd_dev { struct se_device dev; diff --git a/trunk/drivers/target/target_core_transport.c b/trunk/drivers/target/target_core_transport.c index 21e315874a54..f8388b4024aa 100644 --- a/trunk/drivers/target/target_core_transport.c +++ b/trunk/drivers/target/target_core_transport.c @@ -65,7 +65,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev); static int transport_generic_get_mem(struct se_cmd *cmd); -static int transport_put_cmd(struct se_cmd *cmd); +static void transport_put_cmd(struct se_cmd *cmd); static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) @@ -221,7 +221,6 @@ struct se_session *transport_init_session(void) INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); INIT_LIST_HEAD(&se_sess->sess_cmd_list); - INIT_LIST_HEAD(&se_sess->sess_wait_list); spin_lock_init(&se_sess->sess_cmd_lock); kref_init(&se_sess->sess_kref); @@ -1944,7 +1943,7 @@ static inline void transport_free_pages(struct se_cmd *cmd) * This routine unconditionally frees a command, and reference counting * or list removal must be done in the caller. */ -static int transport_release_cmd(struct se_cmd *cmd) +static void transport_release_cmd(struct se_cmd *cmd) { BUG_ON(!cmd->se_tfo); @@ -1956,11 +1955,11 @@ static int transport_release_cmd(struct se_cmd *cmd) * If this cmd has been setup with target_get_sess_cmd(), drop * the kref and call ->release_cmd() in kref callback. */ - if (cmd->check_release != 0) - return target_put_sess_cmd(cmd->se_sess, cmd); - + if (cmd->check_release != 0) { + target_put_sess_cmd(cmd->se_sess, cmd); + return; + } cmd->se_tfo->release_cmd(cmd); - return 1; } /** @@ -1969,7 +1968,7 @@ static int transport_release_cmd(struct se_cmd *cmd) * * This routine releases our reference to the command and frees it if possible. */ -static int transport_put_cmd(struct se_cmd *cmd) +static void transport_put_cmd(struct se_cmd *cmd) { unsigned long flags; @@ -1977,7 +1976,7 @@ static int transport_put_cmd(struct se_cmd *cmd) if (atomic_read(&cmd->t_fe_count) && !atomic_dec_and_test(&cmd->t_fe_count)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return 0; + return; } if (cmd->transport_state & CMD_T_DEV_ACTIVE) { @@ -1987,7 +1986,8 @@ static int transport_put_cmd(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_free_pages(cmd); - return transport_release_cmd(cmd); + transport_release_cmd(cmd); + return; } void *transport_kmap_data_sg(struct se_cmd *cmd) @@ -2152,25 +2152,24 @@ static void transport_write_pending_qf(struct se_cmd *cmd) } } -int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) +void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { - int ret = 0; - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) transport_wait_for_tasks(cmd); - ret = transport_release_cmd(cmd); + transport_release_cmd(cmd); } else { if (wait_for_tasks) transport_wait_for_tasks(cmd); + core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); + if (cmd->se_lun) transport_lun_remove_cmd(cmd); - ret = transport_put_cmd(cmd); + transport_put_cmd(cmd); } - return ret; } EXPORT_SYMBOL(transport_generic_free_cmd); @@ -2214,19 +2213,21 @@ static void target_release_cmd_kref(struct kref *kref) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); struct se_session *se_sess = se_cmd->se_sess; + unsigned long flags; + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (list_empty(&se_cmd->se_cmd_list)) { - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); se_cmd->se_tfo->release_cmd(se_cmd); return; } if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); complete(&se_cmd->cmd_wait_comp); return; } list_del(&se_cmd->se_cmd_list); - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); se_cmd->se_tfo->release_cmd(se_cmd); } @@ -2237,8 +2238,7 @@ static void target_release_cmd_kref(struct kref *kref) */ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) { - return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, - &se_sess->sess_cmd_lock); + return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); @@ -2253,14 +2253,11 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) unsigned long flags; spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - if (se_sess->sess_tearing_down) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - return; - } + + WARN_ON(se_sess->sess_tearing_down); se_sess->sess_tearing_down = 1; - list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); - list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) + list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) se_cmd->cmd_wait_set = 1; spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); @@ -2269,32 +2266,44 @@ EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); /* target_wait_for_sess_cmds - Wait for outstanding descriptors * @se_sess: session to wait for active I/O + * @wait_for_tasks: Make extra transport_wait_for_tasks call */ -void target_wait_for_sess_cmds(struct se_session *se_sess) +void target_wait_for_sess_cmds( + struct se_session *se_sess, + int wait_for_tasks) { struct se_cmd *se_cmd, *tmp_cmd; - unsigned long flags; + bool rc = false; list_for_each_entry_safe(se_cmd, tmp_cmd, - &se_sess->sess_wait_list, se_cmd_list) { + &se_sess->sess_cmd_list, se_cmd_list) { list_del(&se_cmd->se_cmd_list); pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" " %d\n", se_cmd, se_cmd->t_state, se_cmd->se_tfo->get_cmd_state(se_cmd)); - wait_for_completion(&se_cmd->cmd_wait_comp); - pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" - " fabric state: %d\n", se_cmd, se_cmd->t_state, - se_cmd->se_tfo->get_cmd_state(se_cmd)); + if (wait_for_tasks) { + pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," + " fabric state: %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); - se_cmd->se_tfo->release_cmd(se_cmd); - } + rc = transport_wait_for_tasks(se_cmd); - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - WARN_ON(!list_empty(&se_sess->sess_cmd_list)); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," + " fabric state: %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); + } + if (!rc) { + wait_for_completion(&se_cmd->cmd_wait_comp); + pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" + " fabric state: %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); + } + + se_cmd->se_tfo->release_cmd(se_cmd); + } } EXPORT_SYMBOL(target_wait_for_sess_cmds); diff --git a/trunk/drivers/thermal/armada_thermal.c b/trunk/drivers/thermal/armada_thermal.c index 54ffd64ca3f7..5b4d75fd7b49 100644 --- a/trunk/drivers/thermal/armada_thermal.c +++ b/trunk/drivers/thermal/armada_thermal.c @@ -169,11 +169,21 @@ static int armada_thermal_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Failed to get platform resource\n"); + return -ENODEV; + } + priv->sensor = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->sensor)) return PTR_ERR(priv->sensor); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(&pdev->dev, "Failed to get platform resource\n"); + return -ENODEV; + } + priv->control = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->control)) return PTR_ERR(priv->control); diff --git a/trunk/drivers/thermal/dove_thermal.c b/trunk/drivers/thermal/dove_thermal.c index a088d1365ca5..4b15a5f270dc 100644 --- a/trunk/drivers/thermal/dove_thermal.c +++ b/trunk/drivers/thermal/dove_thermal.c @@ -149,6 +149,10 @@ static int dove_thermal_probe(struct platform_device *pdev) return PTR_ERR(priv->sensor); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(&pdev->dev, "Failed to get platform resource\n"); + return -ENODEV; + } priv->control = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->control)) return PTR_ERR(priv->control); diff --git a/trunk/drivers/thermal/exynos_thermal.c b/trunk/drivers/thermal/exynos_thermal.c index 788b1ddcac6c..d20ce9e61403 100644 --- a/trunk/drivers/thermal/exynos_thermal.c +++ b/trunk/drivers/thermal/exynos_thermal.c @@ -925,6 +925,11 @@ static int exynos_tmu_probe(struct platform_device *pdev) INIT_WORK(&data->irq_work, exynos_tmu_work); data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!data->mem) { + dev_err(&pdev->dev, "Failed to get platform resource\n"); + return -ENOENT; + } + data->base = devm_ioremap_resource(&pdev->dev, data->mem); if (IS_ERR(data->base)) return PTR_ERR(data->base); diff --git a/trunk/drivers/tty/ehv_bytechan.c b/trunk/drivers/tty/ehv_bytechan.c index 9bffcec5ad82..6d0c27cd03da 100644 --- a/trunk/drivers/tty/ehv_bytechan.c +++ b/trunk/drivers/tty/ehv_bytechan.c @@ -859,7 +859,6 @@ static int __init ehv_bc_init(void) */ static void __exit ehv_bc_exit(void) { - platform_driver_unregister(&ehv_bc_tty_driver); tty_unregister_driver(ehv_bc_driver); put_tty_driver(ehv_bc_driver); kfree(bcs); diff --git a/trunk/drivers/tty/mxser.c b/trunk/drivers/tty/mxser.c index 4c4a23674569..71d6eb2c93b1 100644 --- a/trunk/drivers/tty/mxser.c +++ b/trunk/drivers/tty/mxser.c @@ -1618,12 +1618,8 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp) if (ip->type == PORT_16550A) me->fifo[p] = 1; - if (ip->board->chip_flag == MOXA_MUST_MU860_HWID) { - opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2); - opmode &= OP_MODE_MASK; - } else { - opmode = RS232_MODE; - } + opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2); + opmode &= OP_MODE_MASK; me->iftype[p] = opmode; mutex_unlock(&port->mutex); } @@ -1680,9 +1676,6 @@ static int mxser_ioctl(struct tty_struct *tty, int shiftbit; unsigned char val, mask; - if (info->board->chip_flag != MOXA_MUST_MU860_HWID) - return -EFAULT; - p = tty->index % 4; if (cmd == MOXA_SET_OP_MODE) { if (get_user(opmode, (int __user *) argp)) diff --git a/trunk/drivers/tty/n_tty.c b/trunk/drivers/tty/n_tty.c index 6c7fe90ad72d..d655416087b7 100644 --- a/trunk/drivers/tty/n_tty.c +++ b/trunk/drivers/tty/n_tty.c @@ -1573,14 +1573,6 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) ldata->real_raw = 0; } n_tty_set_room(tty); - /* - * Fix tty hang when I_IXON(tty) is cleared, but the tty - * been stopped by STOP_CHAR(tty) before it. - */ - if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { - start_tty(tty); - } - /* The termios change make the tty ready for I/O */ wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->read_wait); diff --git a/trunk/drivers/tty/pty.c b/trunk/drivers/tty/pty.c index abfd99089781..59bfaecc4e14 100644 --- a/trunk/drivers/tty/pty.c +++ b/trunk/drivers/tty/pty.c @@ -244,9 +244,14 @@ static void pty_flush_buffer(struct tty_struct *tty) static int pty_open(struct tty_struct *tty, struct file *filp) { + int retval = -ENODEV; + if (!tty || !tty->link) - return -ENODEV; + goto out; + + set_bit(TTY_IO_ERROR, &tty->flags); + retval = -EIO; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) goto out; if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) @@ -257,11 +262,9 @@ static int pty_open(struct tty_struct *tty, struct file *filp) clear_bit(TTY_IO_ERROR, &tty->flags); clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); set_bit(TTY_THROTTLED, &tty->flags); - return 0; - + retval = 0; out: - set_bit(TTY_IO_ERROR, &tty->flags); - return -EIO; + return retval; } static void pty_set_termios(struct tty_struct *tty, diff --git a/trunk/drivers/tty/rocket.c b/trunk/drivers/tty/rocket.c index 354564ea47c5..82d35c5a58fd 100644 --- a/trunk/drivers/tty/rocket.c +++ b/trunk/drivers/tty/rocket.c @@ -150,14 +150,12 @@ static Word_t aiop_intr_bits[AIOP_CTL_SIZE] = { AIOP_INTR_BIT_3 }; -#ifdef CONFIG_PCI static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = { UPCI_AIOP_INTR_BIT_0, UPCI_AIOP_INTR_BIT_1, UPCI_AIOP_INTR_BIT_2, UPCI_AIOP_INTR_BIT_3 }; -#endif static Byte_t RData[RDATASIZE] = { 0x00, 0x09, 0xf6, 0x82, @@ -229,6 +227,7 @@ static unsigned long nextLineNumber; static int __init init_ISA(int i); static void rp_wait_until_sent(struct tty_struct *tty, int timeout); static void rp_flush_buffer(struct tty_struct *tty); +static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model); static unsigned char GetLineNumber(int ctrl, int aiop, int ch); static unsigned char SetLineNumber(int ctrl, int aiop, int ch); static void rp_start(struct tty_struct *tty); @@ -242,6 +241,11 @@ static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags); static void sModemReset(CONTROLLER_T * CtlP, int chan, int on); static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on); static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data); +static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, + ByteIO_t * AiopIOList, int AiopIOListSize, + WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, + int PeriodicOnly, int altChanRingIndicator, + int UPCIRingInd); static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO, ByteIO_t * AiopIOList, int AiopIOListSize, int IRQNum, Byte_t Frequency, int PeriodicOnly); @@ -1771,145 +1775,6 @@ static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = { }; MODULE_DEVICE_TABLE(pci, rocket_pci_ids); -/* Resets the speaker controller on RocketModem II and III devices */ -static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model) -{ - ByteIO_t addr; - - /* RocketModem II speaker control is at the 8th port location of offset 0x40 */ - if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) { - addr = CtlP->AiopIO[0] + 0x4F; - sOutB(addr, 0); - } - - /* RocketModem III speaker control is at the 1st port location of offset 0x80 */ - if ((model == MODEL_UPCI_RM3_8PORT) - || (model == MODEL_UPCI_RM3_4PORT)) { - addr = CtlP->AiopIO[0] + 0x88; - sOutB(addr, 0); - } -} - -/*************************************************************************** -Function: sPCIInitController -Purpose: Initialization of controller global registers and controller - structure. -Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize, - IRQNum,Frequency,PeriodicOnly) - CONTROLLER_T *CtlP; Ptr to controller structure - int CtlNum; Controller number - ByteIO_t *AiopIOList; List of I/O addresses for each AIOP. - This list must be in the order the AIOPs will be found on the - controller. Once an AIOP in the list is not found, it is - assumed that there are no more AIOPs on the controller. - int AiopIOListSize; Number of addresses in AiopIOList - int IRQNum; Interrupt Request number. Can be any of the following: - 0: Disable global interrupts - 3: IRQ 3 - 4: IRQ 4 - 5: IRQ 5 - 9: IRQ 9 - 10: IRQ 10 - 11: IRQ 11 - 12: IRQ 12 - 15: IRQ 15 - Byte_t Frequency: A flag identifying the frequency - of the periodic interrupt, can be any one of the following: - FREQ_DIS - periodic interrupt disabled - FREQ_137HZ - 137 Hertz - FREQ_69HZ - 69 Hertz - FREQ_34HZ - 34 Hertz - FREQ_17HZ - 17 Hertz - FREQ_9HZ - 9 Hertz - FREQ_4HZ - 4 Hertz - If IRQNum is set to 0 the Frequency parameter is - overidden, it is forced to a value of FREQ_DIS. - int PeriodicOnly: 1 if all interrupts except the periodic - interrupt are to be blocked. - 0 is both the periodic interrupt and - other channel interrupts are allowed. - If IRQNum is set to 0 the PeriodicOnly parameter is - overidden, it is forced to a value of 0. -Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller - initialization failed. - -Comments: - If periodic interrupts are to be disabled but AIOP interrupts - are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0. - - If interrupts are to be completely disabled set IRQNum to 0. - - Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an - invalid combination. - - This function performs initialization of global interrupt modes, - but it does not actually enable global interrupts. To enable - and disable global interrupts use functions sEnGlobalInt() and - sDisGlobalInt(). Enabling of global interrupts is normally not - done until all other initializations are complete. - - Even if interrupts are globally enabled, they must also be - individually enabled for each channel that is to generate - interrupts. - -Warnings: No range checking on any of the parameters is done. - - No context switches are allowed while executing this function. - - After this function all AIOPs on the controller are disabled, - they can be enabled with sEnAiop(). -*/ -static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, - ByteIO_t * AiopIOList, int AiopIOListSize, - WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, - int PeriodicOnly, int altChanRingIndicator, - int UPCIRingInd) -{ - int i; - ByteIO_t io; - - CtlP->AltChanRingIndicator = altChanRingIndicator; - CtlP->UPCIRingInd = UPCIRingInd; - CtlP->CtlNum = CtlNum; - CtlP->CtlID = CTLID_0001; /* controller release 1 */ - CtlP->BusType = isPCI; /* controller release 1 */ - - if (ConfigIO) { - CtlP->isUPCI = 1; - CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL; - CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL; - CtlP->AiopIntrBits = upci_aiop_intr_bits; - } else { - CtlP->isUPCI = 0; - CtlP->PCIIO = - (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC); - CtlP->AiopIntrBits = aiop_intr_bits; - } - - sPCIControllerEOI(CtlP); /* clear EOI if warm init */ - /* Init AIOPs */ - CtlP->NumAiop = 0; - for (i = 0; i < AiopIOListSize; i++) { - io = AiopIOList[i]; - CtlP->AiopIO[i] = (WordIO_t) io; - CtlP->AiopIntChanIO[i] = io + _INT_CHAN; - - CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */ - if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ - break; /* done looking for AIOPs */ - - CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */ - sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */ - sOutB(io + _INDX_DATA, sClockPrescale); - CtlP->NumAiop++; /* bump count of AIOPs */ - } - - if (CtlP->NumAiop == 0) - return (-1); - else - return (CtlP->NumAiop); -} - /* * Called when a PCI card is found. Retrieves and stores model information, * init's aiopic and serial port hardware. @@ -2654,6 +2519,147 @@ static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO, return (CtlP->NumAiop); } +#ifdef CONFIG_PCI +/*************************************************************************** +Function: sPCIInitController +Purpose: Initialization of controller global registers and controller + structure. +Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize, + IRQNum,Frequency,PeriodicOnly) + CONTROLLER_T *CtlP; Ptr to controller structure + int CtlNum; Controller number + ByteIO_t *AiopIOList; List of I/O addresses for each AIOP. + This list must be in the order the AIOPs will be found on the + controller. Once an AIOP in the list is not found, it is + assumed that there are no more AIOPs on the controller. + int AiopIOListSize; Number of addresses in AiopIOList + int IRQNum; Interrupt Request number. Can be any of the following: + 0: Disable global interrupts + 3: IRQ 3 + 4: IRQ 4 + 5: IRQ 5 + 9: IRQ 9 + 10: IRQ 10 + 11: IRQ 11 + 12: IRQ 12 + 15: IRQ 15 + Byte_t Frequency: A flag identifying the frequency + of the periodic interrupt, can be any one of the following: + FREQ_DIS - periodic interrupt disabled + FREQ_137HZ - 137 Hertz + FREQ_69HZ - 69 Hertz + FREQ_34HZ - 34 Hertz + FREQ_17HZ - 17 Hertz + FREQ_9HZ - 9 Hertz + FREQ_4HZ - 4 Hertz + If IRQNum is set to 0 the Frequency parameter is + overidden, it is forced to a value of FREQ_DIS. + int PeriodicOnly: 1 if all interrupts except the periodic + interrupt are to be blocked. + 0 is both the periodic interrupt and + other channel interrupts are allowed. + If IRQNum is set to 0 the PeriodicOnly parameter is + overidden, it is forced to a value of 0. +Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller + initialization failed. + +Comments: + If periodic interrupts are to be disabled but AIOP interrupts + are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0. + + If interrupts are to be completely disabled set IRQNum to 0. + + Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an + invalid combination. + + This function performs initialization of global interrupt modes, + but it does not actually enable global interrupts. To enable + and disable global interrupts use functions sEnGlobalInt() and + sDisGlobalInt(). Enabling of global interrupts is normally not + done until all other initializations are complete. + + Even if interrupts are globally enabled, they must also be + individually enabled for each channel that is to generate + interrupts. + +Warnings: No range checking on any of the parameters is done. + + No context switches are allowed while executing this function. + + After this function all AIOPs on the controller are disabled, + they can be enabled with sEnAiop(). +*/ +static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, + ByteIO_t * AiopIOList, int AiopIOListSize, + WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, + int PeriodicOnly, int altChanRingIndicator, + int UPCIRingInd) +{ + int i; + ByteIO_t io; + + CtlP->AltChanRingIndicator = altChanRingIndicator; + CtlP->UPCIRingInd = UPCIRingInd; + CtlP->CtlNum = CtlNum; + CtlP->CtlID = CTLID_0001; /* controller release 1 */ + CtlP->BusType = isPCI; /* controller release 1 */ + + if (ConfigIO) { + CtlP->isUPCI = 1; + CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL; + CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL; + CtlP->AiopIntrBits = upci_aiop_intr_bits; + } else { + CtlP->isUPCI = 0; + CtlP->PCIIO = + (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC); + CtlP->AiopIntrBits = aiop_intr_bits; + } + + sPCIControllerEOI(CtlP); /* clear EOI if warm init */ + /* Init AIOPs */ + CtlP->NumAiop = 0; + for (i = 0; i < AiopIOListSize; i++) { + io = AiopIOList[i]; + CtlP->AiopIO[i] = (WordIO_t) io; + CtlP->AiopIntChanIO[i] = io + _INT_CHAN; + + CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */ + if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ + break; /* done looking for AIOPs */ + + CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */ + sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */ + sOutB(io + _INDX_DATA, sClockPrescale); + CtlP->NumAiop++; /* bump count of AIOPs */ + } + + if (CtlP->NumAiop == 0) + return (-1); + else + return (CtlP->NumAiop); +} + +/* Resets the speaker controller on RocketModem II and III devices */ +static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model) +{ + ByteIO_t addr; + + /* RocketModem II speaker control is at the 8th port location of offset 0x40 */ + if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) { + addr = CtlP->AiopIO[0] + 0x4F; + sOutB(addr, 0); + } + + /* RocketModem III speaker control is at the 1st port location of offset 0x80 */ + if ((model == MODEL_UPCI_RM3_8PORT) + || (model == MODEL_UPCI_RM3_4PORT)) { + addr = CtlP->AiopIO[0] + 0x88; + sOutB(addr, 0); + } +} +#endif + /*************************************************************************** Function: sReadAiopID Purpose: Read the AIOP idenfication number directly from an AIOP. diff --git a/trunk/drivers/tty/serial/8250/8250_core.c b/trunk/drivers/tty/serial/8250/8250_core.c index 86c00b1c5583..46528d57be72 100644 --- a/trunk/drivers/tty/serial/8250/8250_core.c +++ b/trunk/drivers/tty/serial/8250/8250_core.c @@ -2755,7 +2755,7 @@ static void __init serial8250_isa_init_ports(void) if (nr_uarts > UART_NR) nr_uarts = UART_NR; - for (i = 0; i < nr_uarts; i++) { + for (i = 0; i < UART_NR; i++) { struct uart_8250_port *up = &serial8250_ports[i]; struct uart_port *port = &up->port; @@ -2916,7 +2916,7 @@ static int __init serial8250_console_setup(struct console *co, char *options) * if so, search for the first available port that does have * console support. */ - if (co->index >= nr_uarts) + if (co->index >= UART_NR) co->index = 0; port = &serial8250_ports[co->index].port; if (!port->iobase && !port->membase) @@ -2957,7 +2957,7 @@ int serial8250_find_port(struct uart_port *p) int line; struct uart_port *port; - for (line = 0; line < nr_uarts; line++) { + for (line = 0; line < UART_NR; line++) { port = &serial8250_ports[line].port; if (uart_match_port(p, port)) return line; @@ -3110,7 +3110,7 @@ static int serial8250_remove(struct platform_device *dev) { int i; - for (i = 0; i < nr_uarts; i++) { + for (i = 0; i < UART_NR; i++) { struct uart_8250_port *up = &serial8250_ports[i]; if (up->port.dev == &dev->dev) @@ -3178,7 +3178,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port * /* * First, find a port entry which matches. */ - for (i = 0; i < nr_uarts; i++) + for (i = 0; i < UART_NR; i++) if (uart_match_port(&serial8250_ports[i].port, port)) return &serial8250_ports[i]; @@ -3187,7 +3187,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port * * free entry. We look for one which hasn't been previously * used (indicated by zero iobase). */ - for (i = 0; i < nr_uarts; i++) + for (i = 0; i < UART_NR; i++) if (serial8250_ports[i].port.type == PORT_UNKNOWN && serial8250_ports[i].port.iobase == 0) return &serial8250_ports[i]; @@ -3196,7 +3196,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port * * That also failed. Last resort is to find any entry which * doesn't have a real port associated with it. */ - for (i = 0; i < nr_uarts; i++) + for (i = 0; i < UART_NR; i++) if (serial8250_ports[i].port.type == PORT_UNKNOWN) return &serial8250_ports[i]; diff --git a/trunk/drivers/tty/serial/8250/8250_dw.c b/trunk/drivers/tty/serial/8250/8250_dw.c index d07b6af3a937..beaa283f5cc6 100644 --- a/trunk/drivers/tty/serial/8250/8250_dw.c +++ b/trunk/drivers/tty/serial/8250/8250_dw.c @@ -338,8 +338,7 @@ static int dw8250_runtime_suspend(struct device *dev) { struct dw8250_data *data = dev_get_drvdata(dev); - if (!IS_ERR(data->clk)) - clk_disable_unprepare(data->clk); + clk_disable_unprepare(data->clk); return 0; } @@ -348,8 +347,7 @@ static int dw8250_runtime_resume(struct device *dev) { struct dw8250_data *data = dev_get_drvdata(dev); - if (!IS_ERR(data->clk)) - clk_prepare_enable(data->clk); + clk_prepare_enable(data->clk); return 0; } @@ -369,7 +367,6 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match); static const struct acpi_device_id dw8250_acpi_match[] = { { "INT33C4", 0 }, { "INT33C5", 0 }, - { "80860F0A", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); diff --git a/trunk/drivers/tty/serial/8250/8250_gsc.c b/trunk/drivers/tty/serial/8250/8250_gsc.c index bb91b4713ebd..097dff9c08ad 100644 --- a/trunk/drivers/tty/serial/8250/8250_gsc.c +++ b/trunk/drivers/tty/serial/8250/8250_gsc.c @@ -30,12 +30,6 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; -#ifdef CONFIG_64BIT - extern int iosapic_serial_irq(int cellnum); - if (!dev->irq && (dev->id.sversion == 0xad)) - dev->irq = iosapic_serial_irq(dev->mod_index-1); -#endif - if (!dev->irq) { /* We find some unattached serial ports by walking native * busses. These should be silently ignored. Otherwise, @@ -57,8 +51,7 @@ static int __init serial_init_chip(struct parisc_device *dev) memset(&uart, 0, sizeof(uart)); uart.port.iotype = UPIO_MEM; /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */ - uart.port.uartclk = (dev->id.sversion != 0xad) ? - 7272727 : 1843200; + uart.port.uartclk = 7272727; uart.port.mapbase = address; uart.port.membase = ioremap_nocache(address, 16); uart.port.irq = dev->irq; @@ -80,7 +73,6 @@ static struct parisc_device_id serial_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, - { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad }, { 0 } }; diff --git a/trunk/drivers/tty/serial/amba-pl011.c b/trunk/drivers/tty/serial/amba-pl011.c index e2774f9ecd59..8ab70a620919 100644 --- a/trunk/drivers/tty/serial/amba-pl011.c +++ b/trunk/drivers/tty/serial/amba-pl011.c @@ -332,7 +332,7 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port * dmaengine_slave_config(chan, &rx_conf); uap->dmarx.chan = chan; - if (plat && plat->dma_rx_poll_enable) { + if (plat->dma_rx_poll_enable) { /* Set poll rate if specified. */ if (plat->dma_rx_poll_rate) { uap->dmarx.auto_poll_rate = false; diff --git a/trunk/drivers/tty/serial/imx.c b/trunk/drivers/tty/serial/imx.c index 8cdfbd365892..147c9e193595 100644 --- a/trunk/drivers/tty/serial/imx.c +++ b/trunk/drivers/tty/serial/imx.c @@ -761,8 +761,6 @@ static int imx_startup(struct uart_port *port) temp = readl(sport->port.membase + UCR2); temp |= (UCR2_RXEN | UCR2_TXEN); - if (!sport->have_rtscts) - temp |= UCR2_IRTS; writel(temp, sport->port.membase + UCR2); if (USE_IRDA(sport)) { diff --git a/trunk/drivers/tty/serial/mcf.c b/trunk/drivers/tty/serial/mcf.c index 65be0c00c4bf..e956377a38fe 100644 --- a/trunk/drivers/tty/serial/mcf.c +++ b/trunk/drivers/tty/serial/mcf.c @@ -707,10 +707,8 @@ static int __init mcf_init(void) if (rc) return rc; rc = platform_driver_register(&mcf_platform_driver); - if (rc) { - uart_unregister_driver(&mcf_driver); + if (rc) return rc; - } return 0; } diff --git a/trunk/drivers/tty/serial/mpc52xx_uart.c b/trunk/drivers/tty/serial/mpc52xx_uart.c index f51b280f3bf2..018bad922554 100644 --- a/trunk/drivers/tty/serial/mpc52xx_uart.c +++ b/trunk/drivers/tty/serial/mpc52xx_uart.c @@ -1497,23 +1497,18 @@ mpc52xx_uart_init(void) if (psc_ops && psc_ops->fifoc_init) { ret = psc_ops->fifoc_init(); if (ret) - goto err_init; + return ret; } ret = platform_driver_register(&mpc52xx_uart_of_driver); if (ret) { printk(KERN_ERR "%s: platform_driver_register failed (%i)\n", __FILE__, ret); - goto err_reg; + uart_unregister_driver(&mpc52xx_uart_driver); + return ret; } return 0; -err_reg: - if (psc_ops && psc_ops->fifoc_uninit) - psc_ops->fifoc_uninit(); -err_init: - uart_unregister_driver(&mpc52xx_uart_driver); - return ret; } static void __exit diff --git a/trunk/drivers/tty/serial/nwpserial.c b/trunk/drivers/tty/serial/nwpserial.c index 549c70a2a63e..77287c54f331 100644 --- a/trunk/drivers/tty/serial/nwpserial.c +++ b/trunk/drivers/tty/serial/nwpserial.c @@ -199,7 +199,7 @@ static void nwpserial_shutdown(struct uart_port *port) dcr_write(up->dcr_host, UART_IER, up->ier); /* free irq */ - free_irq(up->port.irq, up); + free_irq(up->port.irq, port); } static int nwpserial_verify_port(struct uart_port *port, diff --git a/trunk/drivers/tty/serial/omap-serial.c b/trunk/drivers/tty/serial/omap-serial.c index f0b9f6b52b32..30d4f7a783cd 100644 --- a/trunk/drivers/tty/serial/omap-serial.c +++ b/trunk/drivers/tty/serial/omap-serial.c @@ -202,6 +202,26 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up) return pdata->get_context_loss_count(up->dev); } +static void serial_omap_set_forceidle(struct uart_omap_port *up) +{ + struct omap_uart_port_info *pdata = up->dev->platform_data; + + if (!pdata || !pdata->set_forceidle) + return; + + pdata->set_forceidle(up->dev); +} + +static void serial_omap_set_noidle(struct uart_omap_port *up) +{ + struct omap_uart_port_info *pdata = up->dev->platform_data; + + if (!pdata || !pdata->set_noidle) + return; + + pdata->set_noidle(up->dev); +} + static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable) { struct omap_uart_port_info *pdata = up->dev->platform_data; @@ -278,6 +298,8 @@ static void serial_omap_stop_tx(struct uart_port *port) serial_out(up, UART_IER, up->ier); } + serial_omap_set_forceidle(up); + pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); } @@ -342,6 +364,7 @@ static void serial_omap_start_tx(struct uart_port *port) pm_runtime_get_sync(up->dev); serial_omap_enable_ier_thri(up); + serial_omap_set_noidle(up); pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); } diff --git a/trunk/drivers/tty/serial/samsung.c b/trunk/drivers/tty/serial/samsung.c index 0c8a9fa2be6c..074b9194144f 100644 --- a/trunk/drivers/tty/serial/samsung.c +++ b/trunk/drivers/tty/serial/samsung.c @@ -1166,18 +1166,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, ourport->tx_irq = ret; ourport->clk = clk_get(&platdev->dev, "uart"); - if (IS_ERR(ourport->clk)) { - pr_err("%s: Controller clock not found\n", - dev_name(&platdev->dev)); - return PTR_ERR(ourport->clk); - } - - ret = clk_prepare_enable(ourport->clk); - if (ret) { - pr_err("uart: clock failed to prepare+enable: %d\n", ret); - clk_put(ourport->clk); - return ret; - } /* Keep all interrupts masked and cleared */ if (s3c24xx_serial_has_interrupt_mask(port)) { @@ -1192,7 +1180,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, /* reset the fifos (and setup the uart) */ s3c24xx_serial_resetport(port, cfg); - clk_disable_unprepare(ourport->clk); return 0; } @@ -1816,7 +1803,6 @@ static int __init s3c24xx_serial_modinit(void) static void __exit s3c24xx_serial_modexit(void) { - platform_driver_unregister(&samsung_serial_driver); uart_unregister_driver(&s3c24xx_uart_drv); } diff --git a/trunk/drivers/tty/vt/vt.c b/trunk/drivers/tty/vt/vt.c index 740202d8a5c4..fbd447b390f7 100644 --- a/trunk/drivers/tty/vt/vt.c +++ b/trunk/drivers/tty/vt/vt.c @@ -779,6 +779,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ con_set_default_unimap(vc); vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) { + tty_port_destroy(&vc->port); kfree(vc); vc_cons[currcons].d = NULL; return -ENOMEM; @@ -985,25 +986,26 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws) return ret; } -struct vc_data *vc_deallocate(unsigned int currcons) +void vc_deallocate(unsigned int currcons) { - struct vc_data *vc = NULL; - WARN_CONSOLE_UNLOCKED(); if (vc_cons_allocated(currcons)) { - struct vt_notifier_param param; + struct vc_data *vc = vc_cons[currcons].d; + struct vt_notifier_param param = { .vc = vc }; - param.vc = vc = vc_cons[currcons].d; atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m); vcs_remove_sysfs(currcons); vc->vc_sw->con_deinit(vc); put_pid(vc->vt_pid); module_put(vc->vc_sw->owner); kfree(vc->vc_screenbuf); + if (currcons >= MIN_NR_CONSOLES) { + tty_port_destroy(&vc->port); + kfree(vc); + } vc_cons[currcons].d = NULL; } - return vc; } /* diff --git a/trunk/drivers/tty/vt/vt_ioctl.c b/trunk/drivers/tty/vt/vt_ioctl.c index 2bd78e2ac8ec..98ff1735eafc 100644 --- a/trunk/drivers/tty/vt/vt_ioctl.c +++ b/trunk/drivers/tty/vt/vt_ioctl.c @@ -283,48 +283,6 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_ return 0; } -/* deallocate a single console, if possible (leave 0) */ -static int vt_disallocate(unsigned int vc_num) -{ - struct vc_data *vc = NULL; - int ret = 0; - - console_lock(); - if (VT_BUSY(vc_num)) - ret = -EBUSY; - else if (vc_num) - vc = vc_deallocate(vc_num); - console_unlock(); - - if (vc && vc_num >= MIN_NR_CONSOLES) { - tty_port_destroy(&vc->port); - kfree(vc); - } - - return ret; -} - -/* deallocate all unused consoles, but leave 0 */ -static void vt_disallocate_all(void) -{ - struct vc_data *vc[MAX_NR_CONSOLES]; - int i; - - console_lock(); - for (i = 1; i < MAX_NR_CONSOLES; i++) - if (!VT_BUSY(i)) - vc[i] = vc_deallocate(i); - else - vc[i] = NULL; - console_unlock(); - - for (i = 1; i < MAX_NR_CONSOLES; i++) { - if (vc[i] && i >= MIN_NR_CONSOLES) { - tty_port_destroy(&vc[i]->port); - kfree(vc[i]); - } - } -} /* @@ -811,10 +769,24 @@ int vt_ioctl(struct tty_struct *tty, ret = -ENXIO; break; } - if (arg == 0) - vt_disallocate_all(); - else - ret = vt_disallocate(--arg); + if (arg == 0) { + /* deallocate all unused consoles, but leave 0 */ + console_lock(); + for (i=1; iphy; - if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - if (!pdev->dev.coherent_dma_mask) - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + if (!pdev->dev.dma_mask) { + pdev->dev.dma_mask = devm_kzalloc(&pdev->dev, + sizeof(*pdev->dev.dma_mask), GFP_KERNEL); + if (!pdev->dev.dma_mask) { + ret = -ENOMEM; + dev_err(&pdev->dev, "Failed to alloc dma_mask!\n"); + goto err; + } + *pdev->dev.dma_mask = DMA_BIT_MASK(32); + dma_set_coherent_mask(&pdev->dev, *pdev->dev.dma_mask); + } if (usbmisc_ops && usbmisc_ops->init) { ret = usbmisc_ops->init(&pdev->dev); diff --git a/trunk/drivers/usb/chipidea/core.c b/trunk/drivers/usb/chipidea/core.c index 475c9c114689..450107e5f657 100644 --- a/trunk/drivers/usb/chipidea/core.c +++ b/trunk/drivers/usb/chipidea/core.c @@ -276,9 +276,8 @@ static void ci_role_work(struct work_struct *work) ci_role_stop(ci); ci_role_start(ci, role); + enable_irq(ci->irq); } - - enable_irq(ci->irq); } static irqreturn_t ci_irq(int irq, void *data) @@ -371,6 +370,11 @@ static int ci_hdrc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "missing resource\n"); + return -ENODEV; + } + base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/trunk/drivers/usb/chipidea/udc.c b/trunk/drivers/usb/chipidea/udc.c index b501346484ae..519ead2443c5 100644 --- a/trunk/drivers/usb/chipidea/udc.c +++ b/trunk/drivers/usb/chipidea/udc.c @@ -1678,11 +1678,8 @@ static int udc_start(struct ci13xxx *ci) ci->gadget.ep0 = &ci->ep0in->ep; - if (ci->global_phy) { + if (ci->global_phy) ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2); - if (IS_ERR(ci->transceiver)) - ci->transceiver = NULL; - } if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) { if (ci->transceiver == NULL) { @@ -1697,7 +1694,7 @@ static int udc_start(struct ci13xxx *ci) goto put_transceiver; } - if (ci->transceiver) { + if (!IS_ERR_OR_NULL(ci->transceiver)) { retval = otg_set_peripheral(ci->transceiver->otg, &ci->gadget); if (retval) @@ -1714,7 +1711,7 @@ static int udc_start(struct ci13xxx *ci) return retval; remove_trans: - if (ci->transceiver) { + if (!IS_ERR_OR_NULL(ci->transceiver)) { otg_set_peripheral(ci->transceiver->otg, NULL); if (ci->global_phy) usb_put_phy(ci->transceiver); @@ -1722,7 +1719,7 @@ static int udc_start(struct ci13xxx *ci) dev_err(dev, "error = %i\n", retval); put_transceiver: - if (ci->transceiver && ci->global_phy) + if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy) usb_put_phy(ci->transceiver); destroy_eps: destroy_eps(ci); @@ -1750,7 +1747,7 @@ static void udc_stop(struct ci13xxx *ci) dma_pool_destroy(ci->td_pool); dma_pool_destroy(ci->qh_pool); - if (ci->transceiver) { + if (!IS_ERR_OR_NULL(ci->transceiver)) { otg_set_peripheral(ci->transceiver->otg, NULL); if (ci->global_phy) usb_put_phy(ci->transceiver); diff --git a/trunk/drivers/usb/core/Kconfig b/trunk/drivers/usb/core/Kconfig index db535b0aa172..8772b3659296 100644 --- a/trunk/drivers/usb/core/Kconfig +++ b/trunk/drivers/usb/core/Kconfig @@ -51,7 +51,7 @@ config USB_DYNAMIC_MINORS config USB_OTG bool "OTG support" - depends on PM_RUNTIME + depends on USB_SUSPEND default n help The most notable feature of USB OTG is support for a diff --git a/trunk/drivers/usb/core/devio.c b/trunk/drivers/usb/core/devio.c index c88c4fb9459d..caefc800f298 100644 --- a/trunk/drivers/usb/core/devio.c +++ b/trunk/drivers/usb/core/devio.c @@ -1287,13 +1287,9 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, goto error; } for (totlen = u = 0; u < uurb->number_of_packets; u++) { - /* - * arbitrary limit need for USB 3.0 - * bMaxBurst (0~15 allowed, 1~16 packets) - * bmAttributes (bit 1:0, mult 0~2, 1~3 packets) - * sizemax: 1024 * 16 * 3 = 49152 - */ - if (isopkt[u].length > 49152) { + /* arbitrary limit, + * sufficient for USB 2.0 high-bandwidth iso */ + if (isopkt[u].length > 8192) { ret = -EINVAL; goto error; } diff --git a/trunk/drivers/usb/core/quirks.c b/trunk/drivers/usb/core/quirks.c index a63598895077..ab5638d9c707 100644 --- a/trunk/drivers/usb/core/quirks.c +++ b/trunk/drivers/usb/core/quirks.c @@ -88,9 +88,6 @@ static const struct usb_device_id usb_quirk_list[] = { /* Edirol SD-20 */ { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Alcor Micro Corp. Hub */ - { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME }, - /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/trunk/drivers/usb/dwc3/Kconfig b/trunk/drivers/usb/dwc3/Kconfig index 757aa18027d0..ea5ee9c21c35 100644 --- a/trunk/drivers/usb/dwc3/Kconfig +++ b/trunk/drivers/usb/dwc3/Kconfig @@ -19,21 +19,21 @@ choice config USB_DWC3_HOST bool "Host only mode" - depends on USB=y || USB=USB_DWC3 + depends on USB help Select this when you want to use DWC3 in host mode only, thereby the gadget feature will be regressed. config USB_DWC3_GADGET bool "Gadget only mode" - depends on USB_GADGET=y || USB_GADGET=USB_DWC3 + depends on USB_GADGET help Select this when you want to use DWC3 in gadget mode only, thereby the host feature will be regressed. config USB_DWC3_DUAL_ROLE bool "Dual Role mode" - depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3)) + depends on (USB && USB_GADGET) help This is the default mode of working of DWC3 controller where both host and gadget features are enabled. diff --git a/trunk/drivers/usb/dwc3/dwc3-exynos.c b/trunk/drivers/usb/dwc3/dwc3-exynos.c index 8ce9d7fd6cfc..a8afe6e26621 100644 --- a/trunk/drivers/usb/dwc3/dwc3-exynos.c +++ b/trunk/drivers/usb/dwc3/dwc3-exynos.c @@ -95,6 +95,8 @@ static int dwc3_exynos_remove_child(struct device *dev, void *unused) return 0; } +static u64 dwc3_exynos_dma_mask = DMA_BIT_MASK(32); + static int dwc3_exynos_probe(struct platform_device *pdev) { struct dwc3_exynos *exynos; @@ -116,9 +118,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev) * Once we move to full device tree support this will vanish off. */ if (!dev->dma_mask) - dev->dma_mask = &dev->coherent_dma_mask; - if (!dev->coherent_dma_mask) - dev->coherent_dma_mask = DMA_BIT_MASK(32); + dev->dma_mask = &dwc3_exynos_dma_mask; platform_set_drvdata(pdev, exynos); @@ -164,9 +164,9 @@ static int dwc3_exynos_remove(struct platform_device *pdev) { struct dwc3_exynos *exynos = platform_get_drvdata(pdev); - device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child); platform_device_unregister(exynos->usb2_phy); platform_device_unregister(exynos->usb3_phy); + device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child); clk_disable_unprepare(exynos->clk); diff --git a/trunk/drivers/usb/dwc3/dwc3-pci.c b/trunk/drivers/usb/dwc3/dwc3-pci.c index eba9e2baf32b..227d4a7acad7 100644 --- a/trunk/drivers/usb/dwc3/dwc3-pci.c +++ b/trunk/drivers/usb/dwc3/dwc3-pci.c @@ -196,9 +196,9 @@ static void dwc3_pci_remove(struct pci_dev *pci) { struct dwc3_pci *glue = pci_get_drvdata(pci); - platform_device_unregister(glue->dwc3); platform_device_unregister(glue->usb2_phy); platform_device_unregister(glue->usb3_phy); + platform_device_unregister(glue->dwc3); pci_set_drvdata(pci, NULL); pci_disable_device(pci); } diff --git a/trunk/drivers/usb/dwc3/gadget.c b/trunk/drivers/usb/dwc3/gadget.c index b5e5b35df49c..2b6e7e001207 100644 --- a/trunk/drivers/usb/dwc3/gadget.c +++ b/trunk/drivers/usb/dwc3/gadget.c @@ -1706,19 +1706,11 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) dep = dwc->eps[epnum]; if (!dep) continue; - /* - * Physical endpoints 0 and 1 are special; they form the - * bi-directional USB endpoint 0. - * - * For those two physical endpoints, we don't allocate a TRB - * pool nor do we add them the endpoints list. Due to that, we - * shouldn't do these two operations otherwise we would end up - * with all sorts of bugs when removing dwc3.ko. - */ - if (epnum != 0 && epnum != 1) { - dwc3_free_trb_pool(dep); + + dwc3_free_trb_pool(dep); + + if (epnum != 0 && epnum != 1) list_del(&dep->endpoint.ep_list); - } kfree(dep); } diff --git a/trunk/drivers/usb/gadget/Kconfig b/trunk/drivers/usb/gadget/Kconfig index f41aa0d0c414..83300d94a893 100644 --- a/trunk/drivers/usb/gadget/Kconfig +++ b/trunk/drivers/usb/gadget/Kconfig @@ -146,6 +146,7 @@ config USB_LPC32XX depends on ARCH_LPC32XX depends on USB_PHY select USB_ISP1301 + select USB_OTG_UTILS help This option selects the USB device controller in the LPC32xx SoC. diff --git a/trunk/drivers/usb/gadget/atmel_usba_udc.c b/trunk/drivers/usb/gadget/atmel_usba_udc.c index 5a5128a226f7..f2a970f75bfa 100644 --- a/trunk/drivers/usb/gadget/atmel_usba_udc.c +++ b/trunk/drivers/usb/gadget/atmel_usba_udc.c @@ -1992,6 +1992,8 @@ static int __init usba_udc_probe(struct platform_device *pdev) err_get_hclk: clk_put(pclk); + platform_set_drvdata(pdev, NULL); + return ret; } diff --git a/trunk/drivers/usb/gadget/bcm63xx_udc.c b/trunk/drivers/usb/gadget/bcm63xx_udc.c index fd24cb4540a4..6e6518264c42 100644 --- a/trunk/drivers/usb/gadget/bcm63xx_udc.c +++ b/trunk/drivers/usb/gadget/bcm63xx_udc.c @@ -2334,11 +2334,21 @@ static int bcm63xx_udc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "error finding USBD resource\n"); + return -ENXIO; + } + udc->usbd_regs = devm_ioremap_resource(dev, res); if (IS_ERR(udc->usbd_regs)) return PTR_ERR(udc->usbd_regs); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(dev, "error finding IUDMA resource\n"); + return -ENXIO; + } + udc->iudma_regs = devm_ioremap_resource(dev, res); if (IS_ERR(udc->iudma_regs)) return PTR_ERR(udc->iudma_regs); @@ -2410,6 +2420,7 @@ static int bcm63xx_udc_remove(struct platform_device *pdev) usb_del_gadget_udc(&udc->gadget); BUG_ON(udc->driver); + platform_set_drvdata(pdev, NULL); bcm63xx_uninit_udc_hw(udc); return 0; diff --git a/trunk/drivers/usb/gadget/configfs.c b/trunk/drivers/usb/gadget/configfs.c index 80e7f75a56c7..3d5cfc9c2c78 100644 --- a/trunk/drivers/usb/gadget/configfs.c +++ b/trunk/drivers/usb/gadget/configfs.c @@ -821,10 +821,8 @@ static int configfs_composite_bind(struct usb_gadget *gadget, gi->gstrings[i] = NULL; s = usb_gstrings_attach(&gi->cdev, gi->gstrings, USB_GADGET_FIRST_AVAIL_IDX); - if (IS_ERR(s)) { - ret = PTR_ERR(s); + if (IS_ERR(s)) goto err_comp_cleanup; - } gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id; gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id; @@ -849,10 +847,8 @@ static int configfs_composite_bind(struct usb_gadget *gadget, } cfg->gstrings[i] = NULL; s = usb_gstrings_attach(&gi->cdev, cfg->gstrings, 1); - if (IS_ERR(s)) { - ret = PTR_ERR(s); + if (IS_ERR(s)) goto err_comp_cleanup; - } c->iConfiguration = s[0].id; } diff --git a/trunk/drivers/usb/gadget/dummy_hcd.c b/trunk/drivers/usb/gadget/dummy_hcd.c index c588e8e486e5..a792e322f4f1 100644 --- a/trunk/drivers/usb/gadget/dummy_hcd.c +++ b/trunk/drivers/usb/gadget/dummy_hcd.c @@ -1001,6 +1001,7 @@ static int dummy_udc_remove(struct platform_device *pdev) struct dummy *dum = platform_get_drvdata(pdev); usb_del_gadget_udc(&dum->gadget); + platform_set_drvdata(pdev, NULL); device_remove_file(&dum->gadget.dev, &dev_attr_function); return 0; } @@ -2660,10 +2661,8 @@ static int __init init(void) } for (i = 0; i < mod_data.num; i++) { dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL); - if (!dum[i]) { - retval = -ENOMEM; + if (!dum[i]) goto err_add_pdata; - } retval = platform_device_add_data(the_hcd_pdev[i], &dum[i], sizeof(void *)); if (retval) diff --git a/trunk/drivers/usb/gadget/f_ecm.c b/trunk/drivers/usb/gadget/f_ecm.c index abf8a31ae146..d893d6929079 100644 --- a/trunk/drivers/usb/gadget/f_ecm.c +++ b/trunk/drivers/usb/gadget/f_ecm.c @@ -816,7 +816,6 @@ ecm_unbind(struct usb_configuration *c, struct usb_function *f) * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded - * @dev: eth_dev structure * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. diff --git a/trunk/drivers/usb/gadget/f_subset.c b/trunk/drivers/usb/gadget/f_subset.c index 7be04b342494..185d6f5e4e4d 100644 --- a/trunk/drivers/usb/gadget/f_subset.c +++ b/trunk/drivers/usb/gadget/f_subset.c @@ -373,7 +373,6 @@ geth_unbind(struct usb_configuration *c, struct usb_function *f) * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded - * @dev: eth_dev structure * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. diff --git a/trunk/drivers/usb/gadget/f_uac2.c b/trunk/drivers/usb/gadget/f_uac2.c index 03c1fb686644..c7468b6c07b0 100644 --- a/trunk/drivers/usb/gadget/f_uac2.c +++ b/trunk/drivers/usb/gadget/f_uac2.c @@ -456,6 +456,8 @@ static int snd_uac2_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); + platform_set_drvdata(pdev, NULL); + if (card) return snd_card_free(card); diff --git a/trunk/drivers/usb/gadget/fusb300_udc.c b/trunk/drivers/usb/gadget/fusb300_udc.c index b8632d40f8bf..cec8871b77f9 100644 --- a/trunk/drivers/usb/gadget/fusb300_udc.c +++ b/trunk/drivers/usb/gadget/fusb300_udc.c @@ -1461,10 +1461,8 @@ static int __init fusb300_probe(struct platform_device *pdev) fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep, GFP_KERNEL); - if (fusb300->ep0_req == NULL) { - ret = -ENOMEM; + if (fusb300->ep0_req == NULL) goto clean_up3; - } init_controller(fusb300); ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget); diff --git a/trunk/drivers/usb/gadget/imx_udc.c b/trunk/drivers/usb/gadget/imx_udc.c index 9b2d24e4c95f..b5cebd6b0d7a 100644 --- a/trunk/drivers/usb/gadget/imx_udc.c +++ b/trunk/drivers/usb/gadget/imx_udc.c @@ -1511,6 +1511,8 @@ static int __exit imx_udc_remove(struct platform_device *pdev) if (pdata->exit) pdata->exit(&pdev->dev); + platform_set_drvdata(pdev, NULL); + return 0; } diff --git a/trunk/drivers/usb/gadget/m66592-udc.c b/trunk/drivers/usb/gadget/m66592-udc.c index 51cfe72da5bb..866ef0999247 100644 --- a/trunk/drivers/usb/gadget/m66592-udc.c +++ b/trunk/drivers/usb/gadget/m66592-udc.c @@ -1660,10 +1660,8 @@ static int __init m66592_probe(struct platform_device *pdev) m66592->epaddr2ep[0] = &m66592->ep[0]; m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); - if (m66592->ep0_req == NULL) { - ret = -ENOMEM; + if (m66592->ep0_req == NULL) goto clean_up3; - } m66592->ep0_req->complete = nop_completion; init_controller(m66592); diff --git a/trunk/drivers/usb/gadget/pxa25x_udc.c b/trunk/drivers/usb/gadget/pxa25x_udc.c index 95c531d5aa4f..ef47495dec8f 100644 --- a/trunk/drivers/usb/gadget/pxa25x_udc.c +++ b/trunk/drivers/usb/gadget/pxa25x_udc.c @@ -2236,6 +2236,7 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev) dev->transceiver = NULL; } + platform_set_drvdata(pdev, NULL); the_controller = NULL; return 0; } diff --git a/trunk/drivers/usb/gadget/r8a66597-udc.c b/trunk/drivers/usb/gadget/r8a66597-udc.c index 7ff7d9cf2061..0b742d171843 100644 --- a/trunk/drivers/usb/gadget/r8a66597-udc.c +++ b/trunk/drivers/usb/gadget/r8a66597-udc.c @@ -1977,10 +1977,8 @@ static int __init r8a66597_probe(struct platform_device *pdev) r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep, GFP_KERNEL); - if (r8a66597->ep0_req == NULL) { - ret = -ENOMEM; + if (r8a66597->ep0_req == NULL) goto clean_up3; - } r8a66597->ep0_req->complete = nop_completion; ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget); diff --git a/trunk/drivers/usb/gadget/s3c-hsotg.c b/trunk/drivers/usb/gadget/s3c-hsotg.c index af22f24046b2..a3cdc32115d5 100644 --- a/trunk/drivers/usb/gadget/s3c-hsotg.c +++ b/trunk/drivers/usb/gadget/s3c-hsotg.c @@ -437,7 +437,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg, if (hs_req->req.length == 0) return; - usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); + usb_gadget_unmap_request(&hsotg->gadget, hs_req, hs_ep->dir_in); } /** diff --git a/trunk/drivers/usb/gadget/s3c2410_udc.c b/trunk/drivers/usb/gadget/s3c2410_udc.c index 09c4f70c93c4..d0e75e1b3ccb 100644 --- a/trunk/drivers/usb/gadget/s3c2410_udc.c +++ b/trunk/drivers/usb/gadget/s3c2410_udc.c @@ -1851,7 +1851,6 @@ static int s3c2410_udc_probe(struct platform_device *pdev) irq = gpio_to_irq(udc_info->vbus_pin); if (irq < 0) { dev_err(dev, "no irq for gpio vbus pin\n"); - retval = irq; goto err_gpio_claim; } @@ -1949,6 +1948,8 @@ static int s3c2410_udc_remove(struct platform_device *pdev) iounmap(base_addr); release_mem_region(rsrc_start, rsrc_len); + platform_set_drvdata(pdev, NULL); + if (!IS_ERR(udc_clock) && udc_clock != NULL) { clk_disable(udc_clock); clk_put(udc_clock); diff --git a/trunk/drivers/usb/gadget/zero.c b/trunk/drivers/usb/gadget/zero.c index 0deb9d6cde26..2cd6262e8b71 100644 --- a/trunk/drivers/usb/gadget/zero.c +++ b/trunk/drivers/usb/gadget/zero.c @@ -284,16 +284,12 @@ static int __init zero_bind(struct usb_composite_dev *cdev) ss_opts->bulk_buflen = gzero_options.bulk_buflen; func_ss = usb_get_function(func_inst_ss); - if (IS_ERR(func_ss)) { - status = PTR_ERR(func_ss); + if (IS_ERR(func_ss)) goto err_put_func_inst_ss; - } func_inst_lb = usb_get_function_instance("Loopback"); - if (IS_ERR(func_inst_lb)) { - status = PTR_ERR(func_inst_lb); + if (IS_ERR(func_inst_lb)) goto err_put_func_ss; - } lb_opts = container_of(func_inst_lb, struct f_lb_opts, func_inst); lb_opts->bulk_buflen = gzero_options.bulk_buflen; diff --git a/trunk/drivers/usb/host/Kconfig b/trunk/drivers/usb/host/Kconfig index 344d5e2f87d7..de94f2699063 100644 --- a/trunk/drivers/usb/host/Kconfig +++ b/trunk/drivers/usb/host/Kconfig @@ -507,7 +507,7 @@ endif # USB_OHCI_HCD config USB_UHCI_HCD tristate "UHCI HCD (most Intel and VIA) support" - depends on PCI || USB_UHCI_SUPPORT_NON_PCI_HC + depends on PCI || SPARC_LEON || ARCH_VT8500 ---help--- The Universal Host Controller Interface is a standard by Intel for accessing the USB hardware in the PC (which is also called the USB @@ -524,19 +524,26 @@ config USB_UHCI_HCD config USB_UHCI_SUPPORT_NON_PCI_HC bool - default y if (SPARC_LEON || USB_UHCI_PLATFORM) + depends on USB_UHCI_HCD + default y if (SPARC_LEON || ARCH_VT8500) config USB_UHCI_PLATFORM - bool + bool "Generic UHCI Platform Driver support" + depends on USB_UHCI_SUPPORT_NON_PCI_HC default y if ARCH_VT8500 + ---help--- + Enable support for generic UHCI platform devices that require no + additional configuration. config USB_UHCI_BIG_ENDIAN_MMIO bool - default y if SPARC_LEON + depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON + default y config USB_UHCI_BIG_ENDIAN_DESC bool - default y if SPARC_LEON + depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON + default y config USB_FHCI_HCD tristate "Freescale QE USB Host Controller support" diff --git a/trunk/drivers/usb/host/ehci-atmel.c b/trunk/drivers/usb/host/ehci-atmel.c index 02f4611faa62..66420097c242 100644 --- a/trunk/drivers/usb/host/ehci-atmel.c +++ b/trunk/drivers/usb/host/ehci-atmel.c @@ -63,6 +63,8 @@ static void atmel_stop_ehci(struct platform_device *pdev) /*-------------------------------------------------------------------------*/ +static u64 at91_ehci_dma_mask = DMA_BIT_MASK(32); + static int ehci_atmel_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd; @@ -91,9 +93,7 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - if (!pdev->dev.coherent_dma_mask) - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &at91_ehci_dma_mask; hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { diff --git a/trunk/drivers/usb/host/ehci-hcd.c b/trunk/drivers/usb/host/ehci-hcd.c index 246e124e6ac5..312fc10da3c7 100644 --- a/trunk/drivers/usb/host/ehci-hcd.c +++ b/trunk/drivers/usb/host/ehci-hcd.c @@ -1286,6 +1286,23 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ehci_hcd_sead3_driver #endif +#if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \ + !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \ + !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \ + !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \ + !IS_ENABLED(CONFIG_USB_EHCI_HCD_OMAP) && \ + !IS_ENABLED(CONFIG_USB_EHCI_HCD_ORION) && \ + !IS_ENABLED(CONFIG_USB_EHCI_HCD_SPEAR) && \ + !IS_ENABLED(CONFIG_USB_EHCI_S5P) && \ + !IS_ENABLED(CONFIG_USB_EHCI_HCD_AT91) && \ + !IS_ENABLED(CONFIG_USB_EHCI_MSM) && \ + !defined(PLATFORM_DRIVER) && \ + !defined(PS3_SYSTEM_BUS_DRIVER) && \ + !defined(OF_PLATFORM_DRIVER) && \ + !defined(XILINX_OF_PLATFORM_DRIVER) +#error "missing bus glue for ehci-hcd" +#endif + static int __init ehci_hcd_init(void) { int retval = 0; diff --git a/trunk/drivers/usb/host/ehci-omap.c b/trunk/drivers/usb/host/ehci-omap.c index 16d7150e8557..3d1491b5f360 100644 --- a/trunk/drivers/usb/host/ehci-omap.c +++ b/trunk/drivers/usb/host/ehci-omap.c @@ -90,6 +90,8 @@ static const struct ehci_driver_overrides ehci_omap_overrides __initdata = { .extra_priv_size = sizeof(struct omap_hcd), }; +static u64 omap_ehci_dma_mask = DMA_BIT_MASK(32); + /** * ehci_hcd_omap_probe - initialize TI-based HCDs * @@ -144,10 +146,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ - if (!dev->dma_mask) - dev->dma_mask = &dev->coherent_dma_mask; - if (!dev->coherent_dma_mask) - dev->coherent_dma_mask = DMA_BIT_MASK(32); + if (!pdev->dev.dma_mask) + pdev->dev.dma_mask = &omap_ehci_dma_mask; hcd = usb_create_hcd(&ehci_omap_hc_driver, dev, dev_name(dev)); diff --git a/trunk/drivers/usb/host/ehci-orion.c b/trunk/drivers/usb/host/ehci-orion.c index efbc588b48c5..54c579485150 100644 --- a/trunk/drivers/usb/host/ehci-orion.c +++ b/trunk/drivers/usb/host/ehci-orion.c @@ -137,6 +137,8 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd, } } +static u64 ehci_orion_dma_mask = DMA_BIT_MASK(32); + static int ehci_orion_drv_probe(struct platform_device *pdev) { struct orion_ehci_data *pd = pdev->dev.platform_data; @@ -181,9 +183,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev) * now. Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - if (!pdev->dev.coherent_dma_mask) - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &ehci_orion_dma_mask; if (!request_mem_region(res->start, resource_size(res), ehci_orion_hc_driver.description)) { diff --git a/trunk/drivers/usb/host/ehci-s5p.c b/trunk/drivers/usb/host/ehci-s5p.c index 379037f51a2f..635775278c7f 100644 --- a/trunk/drivers/usb/host/ehci-s5p.c +++ b/trunk/drivers/usb/host/ehci-s5p.c @@ -71,6 +71,8 @@ static void s5p_setup_vbus_gpio(struct platform_device *pdev) dev_err(dev, "can't request ehci vbus gpio %d", gpio); } +static u64 ehci_s5p_dma_mask = DMA_BIT_MASK(32); + static int s5p_ehci_probe(struct platform_device *pdev) { struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; @@ -88,7 +90,7 @@ static int s5p_ehci_probe(struct platform_device *pdev) * Once we move to full device tree support this will vanish off. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + pdev->dev.dma_mask = &ehci_s5p_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); @@ -105,7 +107,6 @@ static int s5p_ehci_probe(struct platform_device *pdev) if (IS_ERR(phy)) { /* Fallback to pdata */ if (!pdata) { - usb_put_hcd(hcd); dev_warn(&pdev->dev, "no platform data or transceiver defined\n"); return -EPROBE_DEFER; } else { diff --git a/trunk/drivers/usb/host/ehci-sched.c b/trunk/drivers/usb/host/ehci-sched.c index f80d0330d548..acff5b8f6e89 100644 --- a/trunk/drivers/usb/host/ehci-sched.c +++ b/trunk/drivers/usb/host/ehci-sched.c @@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) } static const unsigned char -max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; /* carryover low/fullspeed bandwidth that crosses uframe boundries */ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) @@ -646,10 +646,6 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) /* reschedule QH iff another request is queued */ if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) { rc = qh_schedule(ehci, qh); - if (rc == 0) { - qh_refresh(ehci, qh); - qh_link_periodic(ehci, qh); - } /* An error here likely indicates handshake failure * or no space left in the schedule. Neither fault @@ -657,10 +653,9 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) * * FIXME kill the now-dysfunctional queued urbs */ - else { + if (rc != 0) ehci_err(ehci, "can't reschedule qh %p, err %d\n", qh, rc); - } } /* maybe turn off periodic schedule */ diff --git a/trunk/drivers/usb/host/ehci-spear.c b/trunk/drivers/usb/host/ehci-spear.c index bd3e5cbc6240..61ecfb3d52f5 100644 --- a/trunk/drivers/usb/host/ehci-spear.c +++ b/trunk/drivers/usb/host/ehci-spear.c @@ -58,6 +58,8 @@ static int ehci_spear_drv_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend, ehci_spear_drv_resume); +static u64 spear_ehci_dma_mask = DMA_BIT_MASK(32); + static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd ; @@ -82,9 +84,7 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - if (!pdev->dev.coherent_dma_mask) - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &spear_ehci_dma_mask; usbh_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(usbh_clk)) { diff --git a/trunk/drivers/usb/host/ehci-tegra.c b/trunk/drivers/usb/host/ehci-tegra.c index 59d111bf44a9..e3eddc31ac83 100644 --- a/trunk/drivers/usb/host/ehci-tegra.c +++ b/trunk/drivers/usb/host/ehci-tegra.c @@ -637,6 +637,8 @@ static void tegra_ehci_set_phcd(struct usb_phy *x, bool enable) writel(val, base + TEGRA_USB_PORTSC1); } +static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32); + static int tegra_ehci_probe(struct platform_device *pdev) { struct resource *res; @@ -659,9 +661,7 @@ static int tegra_ehci_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - if (!pdev->dev.coherent_dma_mask) - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &tegra_ehci_dma_mask; setup_vbus_gpio(pdev, pdata); diff --git a/trunk/drivers/usb/host/isp1760-hcd.c b/trunk/drivers/usb/host/isp1760-hcd.c index 2facee53eab1..125e261f5bfc 100644 --- a/trunk/drivers/usb/host/isp1760-hcd.c +++ b/trunk/drivers/usb/host/isp1760-hcd.c @@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf) int retval = 1; unsigned long flags; - /* if !PM_RUNTIME, root hub timers won't get shut down ... */ + /* if !USB_SUSPEND, root hub timers won't get shut down ... */ if (!HC_IS_RUNNING(hcd->state)) return 0; diff --git a/trunk/drivers/usb/host/isp1760-if.c b/trunk/drivers/usb/host/isp1760-if.c index a13709ee4e5d..bbb791bd7617 100644 --- a/trunk/drivers/usb/host/isp1760-if.c +++ b/trunk/drivers/usb/host/isp1760-if.c @@ -373,10 +373,8 @@ static int isp1760_plat_probe(struct platform_device *pdev) irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq_res) { pr_warning("isp1760: IRQ resource not available\n"); - ret = -ENODEV; - goto cleanup; + return -ENODEV; } - irqflags |= irq_res->flags & IRQF_TRIGGER_MASK; if (priv) { diff --git a/trunk/drivers/usb/host/ohci-at91.c b/trunk/drivers/usb/host/ohci-at91.c index 2ee1496dbc1d..a0cb44f0e724 100644 --- a/trunk/drivers/usb/host/ohci-at91.c +++ b/trunk/drivers/usb/host/ohci-at91.c @@ -504,6 +504,8 @@ static const struct of_device_id at91_ohci_dt_ids[] = { MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids); +static u64 at91_ohci_dma_mask = DMA_BIT_MASK(32); + static int ohci_at91_of_init(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -520,9 +522,7 @@ static int ohci_at91_of_init(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - if (!pdev->dev.coherent_dma_mask) - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &at91_ohci_dma_mask; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) diff --git a/trunk/drivers/usb/host/ohci-exynos.c b/trunk/drivers/usb/host/ohci-exynos.c index b0b542c14e31..07592c00af26 100644 --- a/trunk/drivers/usb/host/ohci-exynos.c +++ b/trunk/drivers/usb/host/ohci-exynos.c @@ -98,6 +98,8 @@ static const struct hc_driver exynos_ohci_hc_driver = { .start_port_reset = ohci_start_port_reset, }; +static u64 ohci_exynos_dma_mask = DMA_BIT_MASK(32); + static int exynos_ohci_probe(struct platform_device *pdev) { struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data; @@ -115,7 +117,7 @@ static int exynos_ohci_probe(struct platform_device *pdev) * Once we move to full device tree support this will vanish off. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + pdev->dev.dma_mask = &ohci_exynos_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); diff --git a/trunk/drivers/usb/host/ohci-hcd.c b/trunk/drivers/usb/host/ohci-hcd.c index fc627fd54116..9e6de9586ae4 100644 --- a/trunk/drivers/usb/host/ohci-hcd.c +++ b/trunk/drivers/usb/host/ohci-hcd.c @@ -233,14 +233,14 @@ static int ohci_urb_enqueue ( urb->start_frame = frame; } } else if (ed->type == PIPE_ISOCHRONOUS) { - u16 next = ohci_frame_no(ohci) + 1; + u16 next = ohci_frame_no(ohci) + 2; u16 frame = ed->last_iso + ed->interval; /* Behind the scheduling threshold? */ if (unlikely(tick_before(frame, next))) { /* USB_ISO_ASAP: Round up to the first available slot */ - if (urb->transfer_flags & URB_ISO_ASAP) { + if (urb->transfer_flags & URB_ISO_ASAP) frame += (next - frame + ed->interval - 1) & -ed->interval; @@ -248,25 +248,21 @@ static int ohci_urb_enqueue ( * Not ASAP: Use the next slot in the stream. If * the entire URB falls before the threshold, fail. */ - } else { - if (tick_before(frame + ed->interval * + else if (tick_before(frame + ed->interval * (urb->number_of_packets - 1), next)) { - retval = -EXDEV; - usb_hcd_unlink_urb_from_ep(hcd, urb); - goto fail; - } - - /* - * Some OHCI hardware doesn't handle late TDs - * correctly. After retiring them it proceeds - * to the next ED instead of the next TD. - * Therefore we have to omit the late TDs - * entirely. - */ - urb_priv->td_cnt = DIV_ROUND_UP( - (u16) (next - frame), - ed->interval); + retval = -EXDEV; + usb_hcd_unlink_urb_from_ep(hcd, urb); + goto fail; } + + /* + * Some OHCI hardware doesn't handle late TDs + * correctly. After retiring them it proceeds to + * the next ED instead of the next TD. Therefore + * we have to omit the late TDs entirely. + */ + urb_priv->td_cnt = DIV_ROUND_UP(next - frame, + ed->interval); } urb->start_frame = frame; } diff --git a/trunk/drivers/usb/host/ohci-nxp.c b/trunk/drivers/usb/host/ohci-nxp.c index 5d7eb72c5064..f4988fbe78e7 100644 --- a/trunk/drivers/usb/host/ohci-nxp.c +++ b/trunk/drivers/usb/host/ohci-nxp.c @@ -223,7 +223,8 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) isp1301_i2c_client = isp1301_get_client(isp1301_node); if (!isp1301_i2c_client) { - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto out; } pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); @@ -233,7 +234,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (usb_disabled()) { dev_err(&pdev->dev, "USB is disabled\n"); ret = -ENODEV; - goto fail_disable; + goto out; } /* Enable AHB slave USB clock, needed for further USB clock control */ @@ -244,19 +245,19 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (IS_ERR(usb_pll_clk)) { dev_err(&pdev->dev, "failed to acquire USB PLL\n"); ret = PTR_ERR(usb_pll_clk); - goto fail_pll; + goto out1; } ret = clk_enable(usb_pll_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB PLL\n"); - goto fail_pllen; + goto out2; } ret = clk_set_rate(usb_pll_clk, 48000); if (ret < 0) { dev_err(&pdev->dev, "failed to set USB clock rate\n"); - goto fail_rate; + goto out3; } /* Enable USB device clock */ @@ -264,13 +265,13 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (IS_ERR(usb_dev_clk)) { dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n"); ret = PTR_ERR(usb_dev_clk); - goto fail_dev; + goto out4; } ret = clk_enable(usb_dev_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB DEV Clock\n"); - goto fail_deven; + goto out5; } /* Enable USB otg clocks */ @@ -278,7 +279,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (IS_ERR(usb_otg_clk)) { dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n"); ret = PTR_ERR(usb_otg_clk); - goto fail_otg; + goto out6; } __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL); @@ -286,7 +287,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) ret = clk_enable(usb_otg_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB DEV Clock\n"); - goto fail_otgen; + goto out7; } isp1301_configure(); @@ -295,14 +296,20 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (!hcd) { dev_err(&pdev->dev, "Failed to allocate HC buffer\n"); ret = -ENOMEM; - goto fail_hcd; + goto out8; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Failed to get MEM resource\n"); + ret = -ENOMEM; + goto out8; + } + hcd->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); - goto fail_resource; + goto out8; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); @@ -310,7 +317,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENXIO; - goto fail_resource; + goto out8; } nxp_start_hc(); @@ -324,24 +331,23 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) return ret; nxp_stop_hc(); -fail_resource: +out8: usb_put_hcd(hcd); -fail_hcd: +out7: clk_disable(usb_otg_clk); -fail_otgen: +out6: clk_put(usb_otg_clk); -fail_otg: +out5: clk_disable(usb_dev_clk); -fail_deven: +out4: clk_put(usb_dev_clk); -fail_dev: -fail_rate: +out3: clk_disable(usb_pll_clk); -fail_pllen: +out2: clk_put(usb_pll_clk); -fail_pll: -fail_disable: +out1: isp1301_i2c_client = NULL; +out: return ret; } diff --git a/trunk/drivers/usb/host/ohci-omap3.c b/trunk/drivers/usb/host/ohci-omap3.c index 8663851c8d8e..ddfc31427bc0 100644 --- a/trunk/drivers/usb/host/ohci-omap3.c +++ b/trunk/drivers/usb/host/ohci-omap3.c @@ -114,6 +114,8 @@ static const struct hc_driver ohci_omap3_hc_driver = { /*-------------------------------------------------------------------------*/ +static u64 omap_ohci_dma_mask = DMA_BIT_MASK(32); + /* * configure so an HC device and id are always provided * always called with process context; sleeping is OK @@ -166,10 +168,8 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev) * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ - if (!dev->dma_mask) - dev->dma_mask = &dev->coherent_dma_mask; - if (!dev->coherent_dma_mask) - dev->coherent_dma_mask = DMA_BIT_MASK(32); + if (!pdev->dev.dma_mask) + pdev->dev.dma_mask = &omap_ohci_dma_mask; hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev, dev_name(dev)); diff --git a/trunk/drivers/usb/host/ohci-pxa27x.c b/trunk/drivers/usb/host/ohci-pxa27x.c index 279b2ef17411..efe71f3ca477 100644 --- a/trunk/drivers/usb/host/ohci-pxa27x.c +++ b/trunk/drivers/usb/host/ohci-pxa27x.c @@ -282,6 +282,8 @@ static const struct of_device_id pxa_ohci_dt_ids[] = { MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids); +static u64 pxa_ohci_dma_mask = DMA_BIT_MASK(32); + static int ohci_pxa_of_init(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -296,9 +298,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - if (!pdev->dev.coherent_dma_mask) - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &pxa_ohci_dma_mask; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) diff --git a/trunk/drivers/usb/host/ohci-spear.c b/trunk/drivers/usb/host/ohci-spear.c index 3e19e0170d11..9020bf0e2eca 100644 --- a/trunk/drivers/usb/host/ohci-spear.c +++ b/trunk/drivers/usb/host/ohci-spear.c @@ -91,6 +91,8 @@ static const struct hc_driver ohci_spear_hc_driver = { .start_port_reset = ohci_start_port_reset, }; +static u64 spear_ohci_dma_mask = DMA_BIT_MASK(32); + static int spear_ohci_hcd_drv_probe(struct platform_device *pdev) { const struct hc_driver *driver = &ohci_spear_hc_driver; @@ -112,9 +114,7 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - if (!pdev->dev.coherent_dma_mask) - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &spear_ohci_dma_mask; usbh_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(usbh_clk)) { diff --git a/trunk/drivers/usb/host/oxu210hp-hcd.c b/trunk/drivers/usb/host/oxu210hp-hcd.c index 0f401dbfaf07..4f0f0339532f 100644 --- a/trunk/drivers/usb/host/oxu210hp-hcd.c +++ b/trunk/drivers/usb/host/oxu210hp-hcd.c @@ -3084,7 +3084,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf) int ports, i, retval = 1; unsigned long flags; - /* if !PM_RUNTIME, root hub timers won't get shut down ... */ + /* if !USB_SUSPEND, root hub timers won't get shut down ... */ if (!HC_IS_RUNNING(hcd->state)) return 0; diff --git a/trunk/drivers/usb/host/sl811-hcd.c b/trunk/drivers/usb/host/sl811-hcd.c index b2ec7fe758dd..ad4483efb6d6 100644 --- a/trunk/drivers/usb/host/sl811-hcd.c +++ b/trunk/drivers/usb/host/sl811-hcd.c @@ -22,7 +22,7 @@ * and usb-storage. * * TODO: - * - usb suspend/resume triggered by sl811 (with PM_RUNTIME) + * - usb suspend/resume triggered by sl811 (with USB_SUSPEND) * - various issues noted in the code * - performance work; use both register banks; ... * - use urb->iso_frame_desc[] with ISO transfers diff --git a/trunk/drivers/usb/host/uhci-hub.c b/trunk/drivers/usb/host/uhci-hub.c index 9189bc984c98..f87bee6d2789 100644 --- a/trunk/drivers/usb/host/uhci-hub.c +++ b/trunk/drivers/usb/host/uhci-hub.c @@ -225,8 +225,7 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf) /* auto-stop if nothing connected for 1 second */ if (any_ports_active(uhci)) uhci->rh_state = UHCI_RH_RUNNING; - else if (time_after_eq(jiffies, uhci->auto_stop_time) && - !uhci->wait_for_hp) + else if (time_after_eq(jiffies, uhci->auto_stop_time)) suspend_rh(uhci, UHCI_RH_AUTO_STOPPED); break; diff --git a/trunk/drivers/usb/host/uhci-platform.c b/trunk/drivers/usb/host/uhci-platform.c index f1db61ada6a8..8c4dace4b14a 100644 --- a/trunk/drivers/usb/host/uhci-platform.c +++ b/trunk/drivers/usb/host/uhci-platform.c @@ -60,6 +60,8 @@ static const struct hc_driver uhci_platform_hc_driver = { .hub_control = uhci_hub_control, }; +static u64 platform_uhci_dma_mask = DMA_BIT_MASK(32); + static int uhci_hcd_platform_probe(struct platform_device *pdev) { struct usb_hcd *hcd; @@ -76,9 +78,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - if (!pdev->dev.coherent_dma_mask) - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &platform_uhci_dma_mask; hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev, pdev->name); diff --git a/trunk/drivers/usb/host/uhci-q.c b/trunk/drivers/usb/host/uhci-q.c index 041c6ddb695c..f0976d8190bc 100644 --- a/trunk/drivers/usb/host/uhci-q.c +++ b/trunk/drivers/usb/host/uhci-q.c @@ -1287,7 +1287,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, return -EINVAL; /* Can't change the period */ } else { - next = uhci->frame_number + 1; + next = uhci->frame_number + 2; /* Find the next unused frame */ if (list_empty(&qh->queue)) { diff --git a/trunk/drivers/usb/host/xhci-mem.c b/trunk/drivers/usb/host/xhci-mem.c index fbf75e57628b..965b539bc474 100644 --- a/trunk/drivers/usb/host/xhci-mem.c +++ b/trunk/drivers/usb/host/xhci-mem.c @@ -1423,17 +1423,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); /* Set the max packet size and max burst */ - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); - max_burst = 0; switch (udev->speed) { case USB_SPEED_SUPER: + max_packet = usb_endpoint_maxp(&ep->desc); + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); /* dig out max burst from ep companion desc */ - max_burst = ep->ss_ep_comp.bMaxBurst; + max_packet = ep->ss_ep_comp.bMaxBurst; + ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); break; case USB_SPEED_HIGH: - /* Some devices get this wrong */ - if (usb_endpoint_xfer_bulk(&ep->desc)) - max_packet = 512; /* bits 11:12 specify the number of additional transaction * opportunities per microframe (USB 2.0, section 9.6.6) */ @@ -1441,16 +1439,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, usb_endpoint_xfer_int(&ep->desc)) { max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; + ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); } - break; + /* Fall through */ case USB_SPEED_FULL: case USB_SPEED_LOW: + max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); break; default: BUG(); } - ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) | - MAX_BURST(max_burst)); max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); @@ -1827,9 +1826,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) } spin_unlock_irqrestore(&xhci->lock, flags); - if (!xhci->rh_bw) - goto no_bw; - num_ports = HCS_MAX_PORTS(xhci->hcs_params1); for (i = 0; i < num_ports; i++) { struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; @@ -1848,7 +1844,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) } } -no_bw: xhci->num_usb2_ports = 0; xhci->num_usb3_ports = 0; xhci->num_active_eps = 0; @@ -2260,9 +2255,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) u32 page_size, temp; int i; - INIT_LIST_HEAD(&xhci->lpm_failed_devs); - INIT_LIST_HEAD(&xhci->cancel_cmd_list); - page_size = xhci_readl(xhci, &xhci->op_regs->page_size); xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); for (i = 0; i < 16; i++) { @@ -2341,6 +2333,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); if (!xhci->cmd_ring) goto fail; + INIT_LIST_HEAD(&xhci->cancel_cmd_list); xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); xhci_dbg(xhci, "First segment DMA is 0x%llx\n", (unsigned long long)xhci->cmd_ring->first_seg->dma); @@ -2451,6 +2444,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) if (xhci_setup_port_arrays(xhci, flags)) goto fail; + INIT_LIST_HEAD(&xhci->lpm_failed_devs); + /* Enable USB 3.0 device notifications for function remote wake, which * is necessary for allowing USB 3.0 devices to do remote wakeup from * U3 (device suspend). diff --git a/trunk/drivers/usb/host/xhci-pci.c b/trunk/drivers/usb/host/xhci-pci.c index cc24e39b97d5..1a30c380043c 100644 --- a/trunk/drivers/usb/host/xhci-pci.c +++ b/trunk/drivers/usb/host/xhci-pci.c @@ -221,14 +221,6 @@ static void xhci_pci_remove(struct pci_dev *dev) static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - - /* - * Systems with the TI redriver that loses port status change events - * need to have the registers polled during D3, so avoid D3cold. - */ - if (xhci_compliance_mode_recovery_timer_quirk_check()) - pdev->no_d3cold = true; return xhci_suspend(xhci); } diff --git a/trunk/drivers/usb/host/xhci.c b/trunk/drivers/usb/host/xhci.c index d8f640b12dd9..b4aa79d154b2 100644 --- a/trunk/drivers/usb/host/xhci.c +++ b/trunk/drivers/usb/host/xhci.c @@ -466,7 +466,7 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) * Systems: * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 */ -bool xhci_compliance_mode_recovery_timer_quirk_check(void) +static bool compliance_mode_recovery_timer_quirk_check(void) { const char *dmi_product_name, *dmi_sys_vendor; @@ -517,7 +517,7 @@ int xhci_init(struct usb_hcd *hcd) xhci_dbg(xhci, "Finished xhci_init\n"); /* Initializing Compliance Mode Recovery Data If Needed */ - if (xhci_compliance_mode_recovery_timer_quirk_check()) { + if (compliance_mode_recovery_timer_quirk_check()) { xhci->quirks |= XHCI_COMP_MODE_QUIRK; compliance_mode_recovery_timer_init(xhci); } @@ -956,7 +956,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; - bool comp_timer_running = false; /* Wait a bit if either of the roothubs need to settle from the * transition into bus suspend. @@ -994,13 +993,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) /* If restore operation fails, re-initialize the HC during resume */ if ((temp & STS_SRE) || hibernated) { - - if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && - !(xhci_all_ports_seen_u0(xhci))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); - xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n"); - } - /* Let the USB core know _both_ roothubs lost power. */ usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); @@ -1043,8 +1035,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) retval = xhci_init(hcd->primary_hcd); if (retval) return retval; - comp_timer_running = true; - xhci_dbg(xhci, "Start the primary HCD\n"); retval = xhci_run(hcd->primary_hcd); if (!retval) { @@ -1086,7 +1076,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) * to suffer the Compliance Mode issue again. It doesn't matter if * ports have entered previously to U0 before system's suspension. */ - if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) + if (xhci->quirks & XHCI_COMP_MODE_QUIRK) compliance_mode_recovery_timer_init(xhci); /* Re-enable port polling. */ diff --git a/trunk/drivers/usb/host/xhci.h b/trunk/drivers/usb/host/xhci.h index 77600cefcaf1..29c978e37135 100644 --- a/trunk/drivers/usb/host/xhci.h +++ b/trunk/drivers/usb/host/xhci.h @@ -1853,7 +1853,4 @@ struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); -/* xHCI quirks */ -bool xhci_compliance_mode_recovery_timer_quirk_check(void); - #endif /* __LINUX_XHCI_HCD_H */ diff --git a/trunk/drivers/usb/misc/adutux.c b/trunk/drivers/usb/misc/adutux.c index 284b85461410..c793aa6f7a67 100644 --- a/trunk/drivers/usb/misc/adutux.c +++ b/trunk/drivers/usb/misc/adutux.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #ifdef CONFIG_USB_DEBUG static int debug = 5; diff --git a/trunk/drivers/usb/musb/musb_dsps.c b/trunk/drivers/usb/musb/musb_dsps.c index e1b661d04021..3a18e44e9391 100644 --- a/trunk/drivers/usb/musb/musb_dsps.c +++ b/trunk/drivers/usb/musb/musb_dsps.c @@ -560,7 +560,6 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, u8 id) if (!config) { dev_err(&pdev->dev, "failed to allocate musb hdrc config\n"); - ret = -ENOMEM; goto err2; } diff --git a/trunk/drivers/usb/musb/musb_host.c b/trunk/drivers/usb/musb/musb_host.c index 9d3044bdebe5..8914dec49f01 100644 --- a/trunk/drivers/usb/musb/musb_host.c +++ b/trunk/drivers/usb/musb/musb_host.c @@ -1232,6 +1232,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) void __iomem *mbase = musb->mregs; struct dma_channel *dma; bool transfer_pending = false; + static bool use_sg; musb_ep_select(mbase, epnum); tx_csr = musb_readw(epio, MUSB_TXCSR); @@ -1462,9 +1463,9 @@ void musb_host_tx(struct musb *musb, u8 epnum) * NULL. */ if (!urb->transfer_buffer) - qh->use_sg = true; + use_sg = true; - if (qh->use_sg) { + if (use_sg) { /* sg_miter_start is already done in musb_ep_program */ if (!sg_miter_next(&qh->sg_miter)) { dev_err(musb->controller, "error: sg list empty\n"); @@ -1483,9 +1484,9 @@ void musb_host_tx(struct musb *musb, u8 epnum) qh->segsize = length; - if (qh->use_sg) { + if (use_sg) { if (offset + length >= urb->transfer_buffer_length) - qh->use_sg = false; + use_sg = false; } musb_ep_select(mbase, epnum); @@ -1551,6 +1552,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) bool done = false; u32 status; struct dma_channel *dma; + static bool use_sg; unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; musb_ep_select(mbase, epnum); @@ -1876,12 +1878,12 @@ void musb_host_rx(struct musb *musb, u8 epnum) * NULL. */ if (!urb->transfer_buffer) { - qh->use_sg = true; + use_sg = true; sg_miter_start(&qh->sg_miter, urb->sg, 1, sg_flags); } - if (qh->use_sg) { + if (use_sg) { if (!sg_miter_next(&qh->sg_miter)) { dev_err(musb->controller, "error: sg list empty\n"); sg_miter_stop(&qh->sg_miter); @@ -1911,8 +1913,8 @@ void musb_host_rx(struct musb *musb, u8 epnum) urb->actual_length += xfer_len; qh->offset += xfer_len; if (done) { - if (qh->use_sg) - qh->use_sg = false; + if (use_sg) + use_sg = false; if (urb->status == -EINPROGRESS) urb->status = status; diff --git a/trunk/drivers/usb/musb/musb_host.h b/trunk/drivers/usb/musb/musb_host.h index 738f7eb60df9..5a9c8feec10c 100644 --- a/trunk/drivers/usb/musb/musb_host.h +++ b/trunk/drivers/usb/musb/musb_host.h @@ -74,7 +74,6 @@ struct musb_qh { u16 frame; /* for periodic schedule */ unsigned iso_idx; /* in urb->iso_frame_desc[] */ struct sg_mapping_iter sg_miter; /* for highmem in PIO mode */ - bool use_sg; /* to track urb using sglist */ }; /* map from control or bulk queue head to the first qh on that ring */ diff --git a/trunk/drivers/usb/musb/omap2430.c b/trunk/drivers/usb/musb/omap2430.c index 628b93fe5ccc..3551f1a30c65 100644 --- a/trunk/drivers/usb/musb/omap2430.c +++ b/trunk/drivers/usb/musb/omap2430.c @@ -549,8 +549,7 @@ static int omap2430_probe(struct platform_device *pdev) glue->control_otghs = omap_get_control_dev(); if (IS_ERR(glue->control_otghs)) { dev_vdbg(&pdev->dev, "Failed to get control device\n"); - ret = PTR_ERR(glue->control_otghs); - goto err2; + return -ENODEV; } } else { glue->control_otghs = ERR_PTR(-ENODEV); diff --git a/trunk/drivers/usb/phy/Kconfig b/trunk/drivers/usb/phy/Kconfig index 2311b1e4e43c..371d0e74e909 100644 --- a/trunk/drivers/usb/phy/Kconfig +++ b/trunk/drivers/usb/phy/Kconfig @@ -4,17 +4,11 @@ menuconfig USB_PHY bool "USB Physical Layer drivers" help - Most USB controllers have the physical layer signalling part - (commonly called a PHY) built in. However, dual-role devices - (a.k.a. USB on-the-go) which support being USB master or slave - with the same connector often use an external PHY. + USB controllers (those which are host, device or DRD) need a + device to handle the physical layer signalling, commonly called + a PHY. - The drivers in this submenu add support for such PHY devices. - They are not needed for standard master-only (or the vast - majority of slave-only) USB interfaces. - - If you're not sure if this applies to you, it probably doesn't; - say N here. + The following drivers add support for such PHY devices. if USB_PHY @@ -31,7 +25,7 @@ config AB8500_USB config FSL_USB2_OTG bool "Freescale USB OTG Transceiver Driver" - depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME + depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND select USB_OTG help Enable this to support Freescale USB OTG transceiver. @@ -145,6 +139,7 @@ config USB_ISP1301 tristate "NXP ISP1301 USB transceiver support" depends on USB || USB_GADGET depends on I2C + select USB_OTG_UTILS help Say Y here to add support for the NXP ISP1301 USB transceiver driver. This chip is typically used as USB transceiver for USB host, gadget @@ -167,7 +162,7 @@ config USB_MSM_OTG config USB_MV_OTG tristate "Marvell USB OTG support" - depends on USB_EHCI_MV && USB_MV_UDC && PM_RUNTIME + depends on USB_EHCI_MV && USB_MV_UDC && USB_SUSPEND select USB_OTG help Say Y here if you want to build Marvell USB OTG transciever diff --git a/trunk/drivers/usb/phy/phy-ab8500-usb.c b/trunk/drivers/usb/phy/phy-ab8500-usb.c index e5eb1b5a04eb..4acef26a2ef5 100644 --- a/trunk/drivers/usb/phy/phy-ab8500-usb.c +++ b/trunk/drivers/usb/phy/phy-ab8500-usb.c @@ -892,6 +892,8 @@ static int ab8500_usb_remove(struct platform_device *pdev) else if (ab->mode == USB_PERIPHERAL) ab8500_usb_peri_phy_dis(ab); + platform_set_drvdata(pdev, NULL); + return 0; } diff --git a/trunk/drivers/usb/phy/phy-fsl-usb.c b/trunk/drivers/usb/phy/phy-fsl-usb.c index e771bafb9f1d..97b9308507c3 100644 --- a/trunk/drivers/usb/phy/phy-fsl-usb.c +++ b/trunk/drivers/usb/phy/phy-fsl-usb.c @@ -799,7 +799,6 @@ static int fsl_otg_conf(struct platform_device *pdev) /* initialize the otg structure */ fsl_otg_tc->phy.label = DRIVER_DESC; - fsl_otg_tc->phy.dev = &pdev->dev; fsl_otg_tc->phy.set_power = fsl_otg_set_power; fsl_otg_tc->phy.otg->phy = &fsl_otg_tc->phy; diff --git a/trunk/drivers/usb/phy/phy-gpio-vbus-usb.c b/trunk/drivers/usb/phy/phy-gpio-vbus-usb.c index 8443335c2ea0..4c76074e518d 100644 --- a/trunk/drivers/usb/phy/phy-gpio-vbus-usb.c +++ b/trunk/drivers/usb/phy/phy-gpio-vbus-usb.c @@ -266,7 +266,6 @@ static int __init gpio_vbus_probe(struct platform_device *pdev) platform_set_drvdata(pdev, gpio_vbus); gpio_vbus->dev = &pdev->dev; gpio_vbus->phy.label = "gpio-vbus"; - gpio_vbus->phy.dev = gpio_vbus->dev; gpio_vbus->phy.set_power = gpio_vbus_set_power; gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend; gpio_vbus->phy.state = OTG_STATE_UNDEFINED; @@ -344,6 +343,7 @@ static int __init gpio_vbus_probe(struct platform_device *pdev) gpio_free(pdata->gpio_pullup); gpio_free(pdata->gpio_vbus); err_gpio: + platform_set_drvdata(pdev, NULL); kfree(gpio_vbus->phy.otg); kfree(gpio_vbus); return err; @@ -365,6 +365,7 @@ static int __exit gpio_vbus_remove(struct platform_device *pdev) if (gpio_is_valid(pdata->gpio_pullup)) gpio_free(pdata->gpio_pullup); gpio_free(gpio); + platform_set_drvdata(pdev, NULL); kfree(gpio_vbus->phy.otg); kfree(gpio_vbus); diff --git a/trunk/drivers/usb/phy/phy-isp1301.c b/trunk/drivers/usb/phy/phy-isp1301.c index 8a55b37d1a02..225ae6c97eeb 100644 --- a/trunk/drivers/usb/phy/phy-isp1301.c +++ b/trunk/drivers/usb/phy/phy-isp1301.c @@ -102,7 +102,6 @@ static int isp1301_probe(struct i2c_client *client, mutex_init(&isp->mutex); phy = &isp->phy; - phy->dev = &client->dev; phy->label = DRV_NAME; phy->init = isp1301_phy_init; phy->set_vbus = isp1301_phy_set_vbus; diff --git a/trunk/drivers/usb/phy/phy-mv-u3d-usb.c b/trunk/drivers/usb/phy/phy-mv-u3d-usb.c index 1568ea63e338..f7838a43347c 100644 --- a/trunk/drivers/usb/phy/phy-mv-u3d-usb.c +++ b/trunk/drivers/usb/phy/phy-mv-u3d-usb.c @@ -278,6 +278,11 @@ static int mv_u3d_phy_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "missing mem resource\n"); + return -ENODEV; + } + phy_base = devm_ioremap_resource(dev, res); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); diff --git a/trunk/drivers/usb/phy/phy-mv-usb.c b/trunk/drivers/usb/phy/phy-mv-usb.c index 4a6b03c73876..c987bbe27851 100644 --- a/trunk/drivers/usb/phy/phy-mv-usb.c +++ b/trunk/drivers/usb/phy/phy-mv-usb.c @@ -667,6 +667,7 @@ int mv_otg_remove(struct platform_device *pdev) mv_otg_disable(mvotg); usb_remove_phy(&mvotg->phy); + platform_set_drvdata(pdev, NULL); return 0; } @@ -849,6 +850,8 @@ static int mv_otg_probe(struct platform_device *pdev) flush_workqueue(mvotg->qwork); destroy_workqueue(mvotg->qwork); + platform_set_drvdata(pdev, NULL); + return retval; } diff --git a/trunk/drivers/usb/phy/phy-mxs-usb.c b/trunk/drivers/usb/phy/phy-mxs-usb.c index bd601c537c8d..9d4381e64d51 100644 --- a/trunk/drivers/usb/phy/phy-mxs-usb.c +++ b/trunk/drivers/usb/phy/phy-mxs-usb.c @@ -130,6 +130,11 @@ static int mxs_phy_probe(struct platform_device *pdev) int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "can't get device resources\n"); + return -ENOENT; + } + base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); @@ -155,7 +160,6 @@ static int mxs_phy_probe(struct platform_device *pdev) mxs_phy->phy.set_suspend = mxs_phy_suspend; mxs_phy->phy.notify_connect = mxs_phy_on_connect; mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect; - mxs_phy->phy.type = USB_PHY_TYPE_USB2; ATOMIC_INIT_NOTIFIER_HEAD(&mxs_phy->phy.notifier); @@ -176,6 +180,8 @@ static int mxs_phy_remove(struct platform_device *pdev) usb_remove_phy(&mxs_phy->phy); + platform_set_drvdata(pdev, NULL); + return 0; } diff --git a/trunk/drivers/usb/phy/phy-nop.c b/trunk/drivers/usb/phy/phy-nop.c index 638cc5dade35..2b10cc969bbb 100644 --- a/trunk/drivers/usb/phy/phy-nop.c +++ b/trunk/drivers/usb/phy/phy-nop.c @@ -254,6 +254,8 @@ static int nop_usb_xceiv_remove(struct platform_device *pdev) usb_remove_phy(&nop->phy); + platform_set_drvdata(pdev, NULL); + return 0; } diff --git a/trunk/drivers/usb/phy/phy-samsung-usb2.c b/trunk/drivers/usb/phy/phy-samsung-usb2.c index 9d5e273abcc7..45ffe036dacc 100644 --- a/trunk/drivers/usb/phy/phy-samsung-usb2.c +++ b/trunk/drivers/usb/phy/phy-samsung-usb2.c @@ -363,6 +363,11 @@ static int samsung_usb2phy_probe(struct platform_device *pdev) int ret; phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!phy_mem) { + dev_err(dev, "%s: missing mem resource\n", __func__); + return -ENODEV; + } + phy_base = devm_ioremap_resource(dev, phy_mem); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); diff --git a/trunk/drivers/usb/phy/phy-samsung-usb3.c b/trunk/drivers/usb/phy/phy-samsung-usb3.c index 5a9efcbcb532..133f3d0c554f 100644 --- a/trunk/drivers/usb/phy/phy-samsung-usb3.c +++ b/trunk/drivers/usb/phy/phy-samsung-usb3.c @@ -239,6 +239,11 @@ static int samsung_usb3phy_probe(struct platform_device *pdev) int ret; phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!phy_mem) { + dev_err(dev, "%s: missing mem resource\n", __func__); + return -ENODEV; + } + phy_base = devm_ioremap_resource(dev, phy_mem); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); diff --git a/trunk/drivers/usb/serial/ark3116.c b/trunk/drivers/usb/serial/ark3116.c index 40e7fd94646f..3b16118cbf62 100644 --- a/trunk/drivers/usb/serial/ark3116.c +++ b/trunk/drivers/usb/serial/ark3116.c @@ -43,7 +43,7 @@ #define DRIVER_NAME "ark3116" /* usb timeout of 1 second */ -#define ARK_TIMEOUT 1000 +#define ARK_TIMEOUT (1*HZ) static const struct usb_device_id id_table[] = { { USB_DEVICE(0x6547, 0x0232) }, diff --git a/trunk/drivers/usb/serial/cypress_m8.c b/trunk/drivers/usb/serial/cypress_m8.c index 082120198f87..d341555d37d8 100644 --- a/trunk/drivers/usb/serial/cypress_m8.c +++ b/trunk/drivers/usb/serial/cypress_m8.c @@ -65,7 +65,6 @@ static const struct usb_device_id id_table_earthmate[] = { static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, - { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { } /* Terminating entry */ }; @@ -79,7 +78,6 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, - { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, { } /* Terminating entry */ }; @@ -231,12 +229,6 @@ static struct usb_serial_driver * const serial_drivers[] = { * Cypress serial helper functions *****************************************************************************/ -/* FRWD Dongle hidcom needs to skip reset and speed checks */ -static inline bool is_frwd(struct usb_device *dev) -{ - return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) && - (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD)); -} static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate) { @@ -246,10 +238,6 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate) if (unstable_bauds) return new_rate; - /* FRWD Dongle uses 115200 bps */ - if (is_frwd(port->serial->dev)) - return new_rate; - /* * The general purpose firmware for the Cypress M8 allows for * a maximum speed of 57600bps (I have no idea whether DeLorme @@ -460,11 +448,7 @@ static int cypress_generic_port_probe(struct usb_serial_port *port) return -ENOMEM; } - /* Skip reset for FRWD device. It is a workaound: - device hangs if it receives SET_CONFIGURE in Configured - state. */ - if (!is_frwd(serial->dev)) - usb_reset_configuration(serial->dev); + usb_reset_configuration(serial->dev); priv->cmd_ctrl = 0; priv->line_control = 0; diff --git a/trunk/drivers/usb/serial/cypress_m8.h b/trunk/drivers/usb/serial/cypress_m8.h index b461311a2ae7..67cf60826884 100644 --- a/trunk/drivers/usb/serial/cypress_m8.h +++ b/trunk/drivers/usb/serial/cypress_m8.h @@ -24,10 +24,6 @@ #define VENDOR_ID_CYPRESS 0x04b4 #define PRODUCT_ID_CYPHIDCOM 0x5500 -/* FRWD Dongle - a GPS sports watch */ -#define VENDOR_ID_FRWD 0x6737 -#define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001 - /* Powercom UPS, chip CY7C63723 */ #define VENDOR_ID_POWERCOM 0x0d9f #define PRODUCT_ID_UPS 0x0002 diff --git a/trunk/drivers/usb/serial/f81232.c b/trunk/drivers/usb/serial/f81232.c index 7d8dd5aad236..090b411d893f 100644 --- a/trunk/drivers/usb/serial/f81232.c +++ b/trunk/drivers/usb/serial/f81232.c @@ -165,12 +165,11 @@ static void f81232_set_termios(struct tty_struct *tty, /* FIXME - Stubbed out for now */ /* Don't change anything if nothing has changed */ - if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios)) + if (!tty_termios_hw_change(&tty->termios, old_termios)) return; /* Do the real work here... */ - if (old_termios) - tty_termios_copy_hw(&tty->termios, old_termios); + tty_termios_copy_hw(&tty->termios, old_termios); } static int f81232_tiocmget(struct tty_struct *tty) @@ -188,11 +187,12 @@ static int f81232_tiocmset(struct tty_struct *tty, static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port) { + struct ktermios tmp_termios; int result; /* Setup termios */ if (tty) - f81232_set_termios(tty, port, NULL); + f81232_set_termios(tty, port, &tmp_termios); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { diff --git a/trunk/drivers/usb/serial/ftdi_sio.c b/trunk/drivers/usb/serial/ftdi_sio.c index 7260ec660347..242b5776648a 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.c +++ b/trunk/drivers/usb/serial/ftdi_sio.c @@ -189,8 +189,6 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) }, { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, - { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) }, - { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, @@ -926,8 +924,8 @@ static int ftdi_tiocmset(struct tty_struct *tty, static int ftdi_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); static void ftdi_break_ctl(struct tty_struct *tty, int break_state); -static bool ftdi_tx_empty(struct usb_serial_port *port); -static int ftdi_get_modem_status(struct usb_serial_port *port, +static int ftdi_chars_in_buffer(struct tty_struct *tty); +static int ftdi_get_modem_status(struct tty_struct *tty, unsigned char status[2]); static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base); @@ -963,7 +961,7 @@ static struct usb_serial_driver ftdi_sio_device = { .ioctl = ftdi_ioctl, .set_termios = ftdi_set_termios, .break_ctl = ftdi_break_ctl, - .tx_empty = ftdi_tx_empty, + .chars_in_buffer = ftdi_chars_in_buffer, }; static struct usb_serial_driver * const serial_drivers[] = { @@ -2058,18 +2056,27 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state) } -static bool ftdi_tx_empty(struct usb_serial_port *port) +static int ftdi_chars_in_buffer(struct tty_struct *tty) { + struct usb_serial_port *port = tty->driver_data; + int chars; unsigned char buf[2]; int ret; - ret = ftdi_get_modem_status(port, buf); + chars = usb_serial_generic_chars_in_buffer(tty); + if (chars) + goto out; + + /* Check if hardware buffer is empty. */ + ret = ftdi_get_modem_status(tty, buf); if (ret == 2) { if (!(buf[1] & FTDI_RS_TEMT)) - return false; + chars = 1; } +out: + dev_dbg(&port->dev, "%s - %d\n", __func__, chars); - return true; + return chars; } /* old_termios contains the original termios settings and tty->termios contains @@ -2261,9 +2268,10 @@ static void ftdi_set_termios(struct tty_struct *tty, * Returns the number of status bytes retrieved (device dependant), or * negative error code. */ -static int ftdi_get_modem_status(struct usb_serial_port *port, +static int ftdi_get_modem_status(struct tty_struct *tty, unsigned char status[2]) { + struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned char *buf; int len; @@ -2328,7 +2336,7 @@ static int ftdi_tiocmget(struct tty_struct *tty) unsigned char buf[2]; int ret; - ret = ftdi_get_modem_status(port, buf); + ret = ftdi_get_modem_status(tty, buf); if (ret < 0) return ret; diff --git a/trunk/drivers/usb/serial/ftdi_sio_ids.h b/trunk/drivers/usb/serial/ftdi_sio_ids.h index 6dd79253205d..98528270c43c 100644 --- a/trunk/drivers/usb/serial/ftdi_sio_ids.h +++ b/trunk/drivers/usb/serial/ftdi_sio_ids.h @@ -772,8 +772,6 @@ */ #define NEWPORT_VID 0x104D #define NEWPORT_AGILIS_PID 0x3000 -#define NEWPORT_CONEX_CC_PID 0x3002 -#define NEWPORT_CONEX_AGP_PID 0x3006 /* Interbiometrics USB I/O Board */ /* Developed for Interbiometrics by Rudolf Gugler */ diff --git a/trunk/drivers/usb/serial/generic.c b/trunk/drivers/usb/serial/generic.c index ba45170c78e5..297665fdd16d 100644 --- a/trunk/drivers/usb/serial/generic.c +++ b/trunk/drivers/usb/serial/generic.c @@ -253,37 +253,6 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty) } EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer); -void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) -{ - struct usb_serial_port *port = tty->driver_data; - unsigned int bps; - unsigned long period; - unsigned long expire; - - bps = tty_get_baud_rate(tty); - if (!bps) - bps = 9600; /* B0 */ - /* - * Use a poll-period of roughly the time it takes to send one - * character or at least one jiffy. - */ - period = max_t(unsigned long, (10 * HZ / bps), 1); - period = min_t(unsigned long, period, timeout); - - dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", - __func__, jiffies_to_msecs(timeout), - jiffies_to_msecs(period)); - expire = jiffies + timeout; - while (!port->serial->type->tx_empty(port)) { - schedule_timeout_interruptible(period); - if (signal_pending(current)) - break; - if (time_after(jiffies, expire)) - break; - } -} -EXPORT_SYMBOL_GPL(usb_serial_generic_wait_until_sent); - static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port, int index, gfp_t mem_flags) { diff --git a/trunk/drivers/usb/serial/io_ti.c b/trunk/drivers/usb/serial/io_ti.c index 1be6ba7bee27..158bf4bc29cc 100644 --- a/trunk/drivers/usb/serial/io_ti.c +++ b/trunk/drivers/usb/serial/io_ti.c @@ -2019,6 +2019,8 @@ static int edge_chars_in_buffer(struct tty_struct *tty) struct edgeport_port *edge_port = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; + int ret; + if (edge_port == NULL) return 0; @@ -2026,22 +2028,16 @@ static int edge_chars_in_buffer(struct tty_struct *tty) chars = kfifo_len(&edge_port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); + if (!chars) { + ret = tx_active(edge_port); + if (ret > 0) + chars = ret; + } + dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); return chars; } -static bool edge_tx_empty(struct usb_serial_port *port) -{ - struct edgeport_port *edge_port = usb_get_serial_port_data(port); - int ret; - - ret = tx_active(edge_port); - if (ret > 0) - return false; - - return true; -} - static void edge_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; @@ -2561,7 +2557,6 @@ static struct usb_serial_driver edgeport_1port_device = { .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, - .tx_empty = edge_tx_empty, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, @@ -2594,7 +2589,6 @@ static struct usb_serial_driver edgeport_2port_device = { .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, - .tx_empty = edge_tx_empty, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, diff --git a/trunk/drivers/usb/serial/iuu_phoenix.c b/trunk/drivers/usb/serial/iuu_phoenix.c index 790673e5faa7..9d74c278b7b5 100644 --- a/trunk/drivers/usb/serial/iuu_phoenix.c +++ b/trunk/drivers/usb/serial/iuu_phoenix.c @@ -287,7 +287,7 @@ static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count) usb_bulk_msg(serial->dev, usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress), buf, - count, &actual, 1000); + count, &actual, HZ * 1); if (status != IUU_OPERATION_OK) dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status); @@ -307,7 +307,7 @@ static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count) usb_bulk_msg(serial->dev, usb_rcvbulkpipe(serial->dev, port->bulk_in_endpointAddress), buf, - count, &actual, 1000); + count, &actual, HZ * 1); if (status != IUU_OPERATION_OK) dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status); diff --git a/trunk/drivers/usb/serial/keyspan.c b/trunk/drivers/usb/serial/keyspan.c index 3549d073df22..eb30d7b01f36 100644 --- a/trunk/drivers/usb/serial/keyspan.c +++ b/trunk/drivers/usb/serial/keyspan.c @@ -1548,6 +1548,7 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial, struct keyspan_serial_private *s_priv; struct keyspan_port_private *p_priv; const struct keyspan_device_details *d_details; + int outcont_urb; struct urb *this_urb; int device_port, err; @@ -1558,6 +1559,7 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial, d_details = s_priv->device_details; device_port = port->number - port->serial->minor; + outcont_urb = d_details->outcont_endpoints[port->number]; this_urb = p_priv->outcont_urb; dev_dbg(&port->dev, "%s - endpoint %d\n", __func__, usb_pipeendpoint(this_urb->pipe)); @@ -1683,6 +1685,14 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial, err = usb_submit_urb(this_urb, GFP_ATOMIC); if (err != 0) dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err); +#if 0 + else { + dev_dbg(&port->dev, "%s - usb_submit_urb(%d) OK %d bytes (end %d)\n", __func__ + outcont_urb, this_urb->transfer_buffer_length, + usb_pipeendpoint(this_urb->pipe)); + } +#endif + return 0; } diff --git a/trunk/drivers/usb/serial/mos7720.c b/trunk/drivers/usb/serial/mos7720.c index f27c621a9297..cc0e54345df9 100644 --- a/trunk/drivers/usb/serial/mos7720.c +++ b/trunk/drivers/usb/serial/mos7720.c @@ -40,7 +40,7 @@ #define DRIVER_DESC "Moschip USB Serial Driver" /* default urb timeout */ -#define MOS_WDR_TIMEOUT 5000 +#define MOS_WDR_TIMEOUT (HZ * 5) #define MOS_MAX_PORT 0x02 #define MOS_WRITE 0x0E @@ -227,22 +227,11 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum, __u8 requesttype = (__u8)0xc0; __u16 index = get_reg_index(reg); __u16 value = get_reg_value(reg, serial_portnum); - u8 *buf; - int status; - - buf = kmalloc(1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - status = usb_control_msg(usbdev, pipe, request, requesttype, value, - index, buf, 1, MOS_WDR_TIMEOUT); - if (status == 1) - *data = *buf; - else if (status < 0) + int status = usb_control_msg(usbdev, pipe, request, requesttype, value, + index, data, 1, MOS_WDR_TIMEOUT); + if (status < 0) dev_err(&usbdev->dev, "mos7720: usb_control_msg() failed: %d", status); - kfree(buf); - return status; } @@ -1629,7 +1618,7 @@ static void change_port_settings(struct tty_struct *tty, mos7720_port->shadowMCR |= (UART_MCR_XONANY); /* To set hardware flow control to the specified * * serial port, in SP1/2_CONTROL_REG */ - if (port_number) + if (port->number) write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01); else write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02); @@ -1938,7 +1927,7 @@ static int mos7720_startup(struct usb_serial *serial) /* setting configuration feature to one */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); + (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ); /* start the interrupt urb */ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); @@ -1981,7 +1970,7 @@ static void mos7720_release(struct usb_serial *serial) /* wait for synchronous usb calls to return */ if (mos_parport->msg_pending) wait_for_completion_timeout(&mos_parport->syncmsg_compl, - msecs_to_jiffies(MOS_WDR_TIMEOUT)); + MOS_WDR_TIMEOUT); parport_remove_port(mos_parport->pp); usb_set_serial_data(serial, NULL); diff --git a/trunk/drivers/usb/serial/mos7840.c b/trunk/drivers/usb/serial/mos7840.c index 7e998081e1cd..a0d5ea545982 100644 --- a/trunk/drivers/usb/serial/mos7840.c +++ b/trunk/drivers/usb/serial/mos7840.c @@ -2142,21 +2142,13 @@ static int mos7840_ioctl(struct tty_struct *tty, static int mos7810_check(struct usb_serial *serial) { int i, pass_count = 0; - u8 *buf; __u16 data = 0, mcr_data = 0; __u16 test_pattern = 0x55AA; - int res; - - buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); - if (!buf) - return 0; /* failed to identify 7810 */ /* Store MCR setting */ - res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), + usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER, - buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); - if (res == VENDOR_READ_LENGTH) - mcr_data = *buf; + &mcr_data, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); for (i = 0; i < 16; i++) { /* Send the 1-bit test pattern out to MCS7810 test pin */ @@ -2166,12 +2158,9 @@ static int mos7810_check(struct usb_serial *serial) MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT); /* Read the test pattern back */ - res = usb_control_msg(serial->dev, - usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ, - MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, - VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); - if (res == VENDOR_READ_LENGTH) - data = *buf; + usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), + MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data, + VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); /* If this is a MCS7810 device, both test patterns must match */ if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001) @@ -2185,8 +2174,6 @@ static int mos7810_check(struct usb_serial *serial) MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT); - kfree(buf); - if (pass_count == 16) return 1; @@ -2196,17 +2183,11 @@ static int mos7810_check(struct usb_serial *serial) static int mos7840_calc_num_ports(struct usb_serial *serial) { __u16 data = 0x00; - u8 *buf; int mos7840_num_ports; - buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); - if (buf) { - usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), - MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, - VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); - data = *buf; - kfree(buf); - } + usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), + MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data, + VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 || serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) { diff --git a/trunk/drivers/usb/serial/option.c b/trunk/drivers/usb/serial/option.c index bd4323ddae1a..734372846abb 100644 --- a/trunk/drivers/usb/serial/option.c +++ b/trunk/drivers/usb/serial/option.c @@ -196,7 +196,6 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ -#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -250,7 +249,13 @@ static void option_instat_callback(struct urb *urb); #define ZTE_PRODUCT_MF622 0x0001 #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 +#define ZTE_PRODUCT_CDMA_TECH 0xfffe +#define ZTE_PRODUCT_AC8710 0xfff1 +#define ZTE_PRODUCT_AC2726 0xfff5 +#define ZTE_PRODUCT_AC8710T 0xffff #define ZTE_PRODUCT_MC2718 0xffe8 +#define ZTE_PRODUCT_AD3812 0xffeb +#define ZTE_PRODUCT_MC2716 0xffed #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 @@ -336,8 +341,8 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_EU3_E 0x0051 #define CINTERION_PRODUCT_EU3_P 0x0052 #define CINTERION_PRODUCT_PH8 0x0053 -#define CINTERION_PRODUCT_AHXX 0x0055 -#define CINTERION_PRODUCT_PLXX 0x0060 +#define CINTERION_PRODUCT_AH6 0x0055 +#define CINTERION_PRODUCT_PLS8 0x0060 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -489,10 +494,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = { .reserved = BIT(4), }; +static const struct option_blacklist_info zte_ad3812_z_blacklist = { + .sendsetup = BIT(0) | BIT(1) | BIT(2), +}; + static const struct option_blacklist_info zte_mc2718_z_blacklist = { .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), }; +static const struct option_blacklist_info zte_mc2716_z_blacklist = { + .sendsetup = BIT(1) | BIT(2) | BIT(3), +}; + static const struct option_blacklist_info huawei_cdc12_blacklist = { .reserved = BIT(1) | BIT(2), }; @@ -579,8 +592,6 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */ - .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, @@ -760,7 +771,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -785,6 +795,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ @@ -955,8 +966,6 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */ - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), @@ -1186,9 +1195,16 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, - /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, @@ -1248,9 +1264,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, diff --git a/trunk/drivers/usb/serial/pl2303.c b/trunk/drivers/usb/serial/pl2303.c index 048cd44d51b1..7151659367a0 100644 --- a/trunk/drivers/usb/serial/pl2303.c +++ b/trunk/drivers/usb/serial/pl2303.c @@ -284,7 +284,7 @@ static void pl2303_set_termios(struct tty_struct *tty, serial settings even to the same values as before. Thus we actually need to filter in this specific case */ - if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios)) + if (!tty_termios_hw_change(&tty->termios, old_termios)) return; cflag = tty->termios.c_cflag; @@ -293,8 +293,7 @@ static void pl2303_set_termios(struct tty_struct *tty, if (!buf) { dev_err(&port->dev, "%s - out of memory.\n", __func__); /* Report back no change occurred */ - if (old_termios) - tty->termios = *old_termios; + tty->termios = *old_termios; return; } @@ -434,7 +433,7 @@ static void pl2303_set_termios(struct tty_struct *tty, control = priv->line_control; if ((cflag & CBAUD) == B0) priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS); - else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) + else if ((old_termios->c_cflag & CBAUD) == B0) priv->line_control |= (CONTROL_DTR | CONTROL_RTS); if (control != priv->line_control) { control = priv->line_control; @@ -493,6 +492,7 @@ static void pl2303_close(struct usb_serial_port *port) static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) { + struct ktermios tmp_termios; struct usb_serial *serial = port->serial; struct pl2303_serial_private *spriv = usb_get_serial_data(serial); int result; @@ -508,7 +508,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) /* Setup termios */ if (tty) - pl2303_set_termios(tty, port, NULL); + pl2303_set_termios(tty, port, &tmp_termios); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { diff --git a/trunk/drivers/usb/serial/qcserial.c b/trunk/drivers/usb/serial/qcserial.c index bd794b43898c..59b32b782126 100644 --- a/trunk/drivers/usb/serial/qcserial.c +++ b/trunk/drivers/usb/serial/qcserial.c @@ -118,7 +118,6 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ - {USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */ /* non Gobi Qualcomm serial devices */ {USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 0)}, /* Sierra Wireless MC7700 Device Management */ diff --git a/trunk/drivers/usb/serial/spcp8x5.c b/trunk/drivers/usb/serial/spcp8x5.c index ddf6c47137dc..cf3df793c2b7 100644 --- a/trunk/drivers/usb/serial/spcp8x5.c +++ b/trunk/drivers/usb/serial/spcp8x5.c @@ -291,6 +291,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty, struct spcp8x5_private *priv = usb_get_serial_port_data(port); unsigned long flags; unsigned int cflag = tty->termios.c_cflag; + unsigned int old_cflag = old_termios->c_cflag; unsigned short uartdata; unsigned char buf[2] = {0, 0}; int baud; @@ -298,15 +299,15 @@ static void spcp8x5_set_termios(struct tty_struct *tty, u8 control; /* check that they really want us to change something */ - if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios)) + if (!tty_termios_hw_change(&tty->termios, old_termios)) return; /* set DTR/RTS active */ spin_lock_irqsave(&priv->lock, flags); control = priv->line_control; - if (old_termios && (old_termios->c_cflag & CBAUD) == B0) { + if ((old_cflag & CBAUD) == B0) { priv->line_control |= MCR_DTR; - if (!(old_termios->c_cflag & CRTSCTS)) + if (!(old_cflag & CRTSCTS)) priv->line_control |= MCR_RTS; } if (control != priv->line_control) { @@ -393,6 +394,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty, static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port) { + struct ktermios tmp_termios; struct usb_serial *serial = port->serial; struct spcp8x5_private *priv = usb_get_serial_port_data(port); int ret; @@ -409,7 +411,7 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port) spcp8x5_set_ctrl_line(port, priv->line_control); if (tty) - spcp8x5_set_termios(tty, port, NULL); + spcp8x5_set_termios(tty, port, &tmp_termios); port->port.drain_delay = 256; diff --git a/trunk/drivers/usb/serial/ti_usb_3410_5052.c b/trunk/drivers/usb/serial/ti_usb_3410_5052.c index e581c2549a57..cac47aef2918 100644 --- a/trunk/drivers/usb/serial/ti_usb_3410_5052.c +++ b/trunk/drivers/usb/serial/ti_usb_3410_5052.c @@ -101,7 +101,6 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count); static int ti_write_room(struct tty_struct *tty); static int ti_chars_in_buffer(struct tty_struct *tty); -static bool ti_tx_empty(struct usb_serial_port *port); static void ti_throttle(struct tty_struct *tty); static void ti_unthrottle(struct tty_struct *tty); static int ti_ioctl(struct tty_struct *tty, @@ -172,8 +171,7 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, - { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, - { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, }; @@ -224,7 +222,6 @@ static struct usb_serial_driver ti_1port_device = { .write = ti_write, .write_room = ti_write_room, .chars_in_buffer = ti_chars_in_buffer, - .tx_empty = ti_tx_empty, .throttle = ti_throttle, .unthrottle = ti_unthrottle, .ioctl = ti_ioctl, @@ -256,7 +253,6 @@ static struct usb_serial_driver ti_2port_device = { .write = ti_write, .write_room = ti_write_room, .chars_in_buffer = ti_chars_in_buffer, - .tx_empty = ti_tx_empty, .throttle = ti_throttle, .unthrottle = ti_unthrottle, .ioctl = ti_ioctl, @@ -688,6 +684,8 @@ static int ti_chars_in_buffer(struct tty_struct *tty) struct ti_port *tport = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; + int ret; + u8 lsr; if (tport == NULL) return 0; @@ -696,22 +694,16 @@ static int ti_chars_in_buffer(struct tty_struct *tty) chars = kfifo_len(&tport->write_fifo); spin_unlock_irqrestore(&tport->tp_lock, flags); + if (!chars) { + ret = ti_get_lsr(tport, &lsr); + if (!ret && !(lsr & TI_LSR_TX_EMPTY)) + chars = 1; + } + dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); return chars; } -static bool ti_tx_empty(struct usb_serial_port *port) -{ - struct ti_port *tport = usb_get_serial_port_data(port); - int ret; - u8 lsr; - - ret = ti_get_lsr(tport, &lsr); - if (!ret && !(lsr & TI_LSR_TX_EMPTY)) - return false; - - return true; -} static void ti_throttle(struct tty_struct *tty) { diff --git a/trunk/drivers/usb/serial/ti_usb_3410_5052.h b/trunk/drivers/usb/serial/ti_usb_3410_5052.h index 4a2423e84d55..b353e7e3d480 100644 --- a/trunk/drivers/usb/serial/ti_usb_3410_5052.h +++ b/trunk/drivers/usb/serial/ti_usb_3410_5052.h @@ -52,9 +52,7 @@ /* Abbott Diabetics vendor and product ids */ #define ABBOTT_VENDOR_ID 0x1a61 -#define ABBOTT_STEREO_PLUG_ID 0x3410 -#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID -#define ABBOTT_STRIP_PORT_ID 0x3420 +#define ABBOTT_PRODUCT_ID 0x3410 /* Commands */ #define TI_GET_VERSION 0x01 diff --git a/trunk/drivers/usb/serial/usb-serial.c b/trunk/drivers/usb/serial/usb-serial.c index 5f6b1ff9d29e..cf75beb1251b 100644 --- a/trunk/drivers/usb/serial/usb-serial.c +++ b/trunk/drivers/usb/serial/usb-serial.c @@ -359,29 +359,20 @@ static int serial_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; + int count = 0; dev_dbg(tty->dev, "%s\n", __func__); - if (serial->disconnected) - return 0; - - return serial->type->chars_in_buffer(tty); -} - -static void serial_wait_until_sent(struct tty_struct *tty, int timeout) -{ - struct usb_serial_port *port = tty->driver_data; - struct usb_serial *serial = port->serial; - - dev_dbg(tty->dev, "%s\n", __func__); - - if (!port->serial->type->wait_until_sent) - return; - mutex_lock(&serial->disc_mutex); - if (!serial->disconnected) - port->serial->type->wait_until_sent(tty, timeout); + /* if the device was unplugged then any remaining characters + fell out of the connector ;) */ + if (serial->disconnected) + count = 0; + else + count = serial->type->chars_in_buffer(tty); mutex_unlock(&serial->disc_mutex); + + return count; } static void serial_throttle(struct tty_struct *tty) @@ -408,7 +399,7 @@ static int serial_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; - int retval = -ENOIOCTLCMD; + int retval = -ENODEV; dev_dbg(tty->dev, "%s - cmd 0x%.4x\n", __func__, cmd); @@ -420,6 +411,8 @@ static int serial_ioctl(struct tty_struct *tty, default: if (port->serial->type->ioctl) retval = port->serial->type->ioctl(tty, cmd, arg); + else + retval = -ENOIOCTLCMD; } return retval; @@ -1198,7 +1191,6 @@ static const struct tty_operations serial_ops = { .unthrottle = serial_unthrottle, .break_ctl = serial_break, .chars_in_buffer = serial_chars_in_buffer, - .wait_until_sent = serial_wait_until_sent, .tiocmget = serial_tiocmget, .tiocmset = serial_tiocmset, .get_icount = serial_get_icount, @@ -1324,8 +1316,6 @@ static void usb_serial_operations_init(struct usb_serial_driver *device) set_to_generic_if_null(device, close); set_to_generic_if_null(device, write_room); set_to_generic_if_null(device, chars_in_buffer); - if (device->tx_empty) - set_to_generic_if_null(device, wait_until_sent); set_to_generic_if_null(device, read_bulk_callback); set_to_generic_if_null(device, write_bulk_callback); set_to_generic_if_null(device, process_read_urb); diff --git a/trunk/drivers/usb/serial/visor.c b/trunk/drivers/usb/serial/visor.c index 9910aa2edf4b..7573ec8a084f 100644 --- a/trunk/drivers/usb/serial/visor.c +++ b/trunk/drivers/usb/serial/visor.c @@ -560,19 +560,10 @@ static int treo_attach(struct usb_serial *serial) */ #define COPY_PORT(dest, src) \ do { \ - int i; \ - \ - for (i = 0; i < ARRAY_SIZE(src->read_urbs); ++i) { \ - dest->read_urbs[i] = src->read_urbs[i]; \ - dest->read_urbs[i]->context = dest; \ - dest->bulk_in_buffers[i] = src->bulk_in_buffers[i]; \ - } \ dest->read_urb = src->read_urb; \ dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\ dest->bulk_in_buffer = src->bulk_in_buffer; \ - dest->bulk_in_size = src->bulk_in_size; \ dest->interrupt_in_urb = src->interrupt_in_urb; \ - dest->interrupt_in_urb->context = dest; \ dest->interrupt_in_endpointAddress = \ src->interrupt_in_endpointAddress;\ dest->interrupt_in_buffer = src->interrupt_in_buffer; \ diff --git a/trunk/drivers/usb/serial/whiteheat.c b/trunk/drivers/usb/serial/whiteheat.c index 347caad47a12..b9fca3586d74 100644 --- a/trunk/drivers/usb/serial/whiteheat.c +++ b/trunk/drivers/usb/serial/whiteheat.c @@ -649,7 +649,7 @@ static void firm_setup_port(struct tty_struct *tty) struct whiteheat_port_settings port_settings; unsigned int cflag = tty->termios.c_cflag; - port_settings.port = port->number - port->serial->minor + 1; + port_settings.port = port->number + 1; /* get the byte size */ switch (cflag & CSIZE) { diff --git a/trunk/drivers/usb/serial/zte_ev.c b/trunk/drivers/usb/serial/zte_ev.c index fca4c752a4ed..39ee7373b4ee 100644 --- a/trunk/drivers/usb/serial/zte_ev.c +++ b/trunk/drivers/usb/serial/zte_ev.c @@ -41,6 +41,9 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, int len; unsigned char *buf; + if (port->number != 0) + return -ENODEV; + buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -50,7 +53,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0001, 0x0000, NULL, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 2st cmd and recieve data */ @@ -62,7 +65,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 3 cmd */ @@ -81,7 +84,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 4 cmd */ @@ -92,7 +95,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 5 cmd */ @@ -104,7 +107,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 6 cmd */ @@ -123,7 +126,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty, result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); kfree(buf); @@ -163,6 +166,9 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) int len; unsigned char *buf; + if (port->number != 0) + return; + buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL); if (!buf) return; @@ -172,7 +178,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0002, 0x0000, NULL, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 2st ctl cmd(CTL 21 22 03 00 00 00 00 00 ) */ @@ -180,7 +186,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 3st cmd and recieve data */ @@ -192,7 +198,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 4 cmd */ @@ -211,7 +217,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 5 cmd */ @@ -222,7 +228,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 6 cmd */ @@ -234,7 +240,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 7 cmd */ @@ -253,7 +259,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 8 cmd */ @@ -264,7 +270,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, - USB_CTRL_GET_TIMEOUT); + HZ * USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); kfree(buf); @@ -273,29 +279,11 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) } static const struct usb_device_id id_table[] = { - /* AC8710, AC8710T */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) }, - /* AC8700 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) }, - /* MG880 */ - { USB_DEVICE(0x19d2, 0xfffd) }, - { USB_DEVICE(0x19d2, 0xfffc) }, - { USB_DEVICE(0x19d2, 0xfffb) }, - /* AC2726, AC8710_V3 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) }, - { USB_DEVICE(0x19d2, 0xfff6) }, - { USB_DEVICE(0x19d2, 0xfff7) }, - { USB_DEVICE(0x19d2, 0xfff8) }, - { USB_DEVICE(0x19d2, 0xfff9) }, - { USB_DEVICE(0x19d2, 0xffee) }, - /* AC2716, MC2716 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) }, - /* AD3812 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) }, - { USB_DEVICE(0x19d2, 0xffec) }, + { USB_DEVICE(0x19d2, 0xffff) }, /* AC8700 */ + { USB_DEVICE(0x19d2, 0xfffe) }, + { USB_DEVICE(0x19d2, 0xfffd) }, /* MG880 */ { USB_DEVICE(0x05C6, 0x3197) }, { USB_DEVICE(0x05C6, 0x6000) }, - { USB_DEVICE(0x05C6, 0x9008) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/trunk/drivers/usb/storage/realtek_cr.c b/trunk/drivers/usb/storage/realtek_cr.c index 281be56d5648..8623577bbbe7 100644 --- a/trunk/drivers/usb/storage/realtek_cr.c +++ b/trunk/drivers/usb/storage/realtek_cr.c @@ -105,9 +105,8 @@ struct rts51x_chip { int status_len; u32 flag; - struct us_data *us; - #ifdef CONFIG_REALTEK_AUTOPM + struct us_data *us; struct timer_list rts51x_suspend_timer; unsigned long timer_expires; int pwr_state; @@ -989,7 +988,6 @@ static int init_realtek_cr(struct us_data *us) us->extra = chip; us->extra_destructor = realtek_cr_destructor; us->max_lun = chip->max_lun = rts51x_get_max_lun(us); - chip->us = us; usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun); @@ -1012,8 +1010,10 @@ static int init_realtek_cr(struct us_data *us) SET_AUTO_DELINK(chip); } #ifdef CONFIG_REALTEK_AUTOPM - if (ss_en) + if (ss_en) { + chip->us = us; realtek_cr_autosuspend_setup(us); + } #endif usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag); diff --git a/trunk/drivers/vfio/pci/vfio_pci.c b/trunk/drivers/vfio/pci/vfio_pci.c index c5179e269df6..ac3725440d64 100644 --- a/trunk/drivers/vfio/pci/vfio_pci.c +++ b/trunk/drivers/vfio/pci/vfio_pci.c @@ -499,6 +499,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) } vma->vm_private_data = vdev; + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; diff --git a/trunk/drivers/vfio/vfio.c b/trunk/drivers/vfio/vfio.c index 6d78736563de..acb7121a9316 100644 --- a/trunk/drivers/vfio/vfio.c +++ b/trunk/drivers/vfio/vfio.c @@ -1360,7 +1360,7 @@ static const struct file_operations vfio_device_fops = { */ static char *vfio_devnode(struct device *dev, umode_t *mode) { - if (mode && (MINOR(dev->devt) == 0)) + if (MINOR(dev->devt) == 0) *mode = S_IRUGO | S_IWUGO; return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); diff --git a/trunk/drivers/vhost/net.c b/trunk/drivers/vhost/net.c index f80d3dd41d8c..2b51e2336aa2 100644 --- a/trunk/drivers/vhost/net.c +++ b/trunk/drivers/vhost/net.c @@ -155,11 +155,14 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) static void vhost_net_clear_ubuf_info(struct vhost_net *n) { + + bool zcopy; int i; - for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { - kfree(n->vqs[i].ubuf_info); - n->vqs[i].ubuf_info = NULL; + for (i = 0; i < n->dev.nvqs; ++i) { + zcopy = vhost_net_zcopy_mask & (0x1 << i); + if (zcopy) + kfree(n->vqs[i].ubuf_info); } } @@ -168,7 +171,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n) bool zcopy; int i; - for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { + for (i = 0; i < n->dev.nvqs; ++i) { zcopy = vhost_net_zcopy_mask & (0x1 << i); if (!zcopy) continue; @@ -180,7 +183,12 @@ int vhost_net_set_ubuf_info(struct vhost_net *n) return 0; err: - vhost_net_clear_ubuf_info(n); + while (i--) { + zcopy = vhost_net_zcopy_mask & (0x1 << i); + if (!zcopy) + continue; + kfree(n->vqs[i].ubuf_info); + } return -ENOMEM; } @@ -188,12 +196,12 @@ void vhost_net_vq_reset(struct vhost_net *n) { int i; - vhost_net_clear_ubuf_info(n); - for (i = 0; i < VHOST_NET_VQ_MAX; i++) { n->vqs[i].done_idx = 0; n->vqs[i].upend_idx = 0; n->vqs[i].ubufs = NULL; + kfree(n->vqs[i].ubuf_info); + n->vqs[i].ubuf_info = NULL; n->vqs[i].vhost_hlen = 0; n->vqs[i].sock_hlen = 0; } @@ -428,8 +436,7 @@ static void handle_tx(struct vhost_net *net) kref_get(&ubufs->kref); } nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; - } else - msg.msg_control = NULL; + } /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(NULL, sock, &msg, len); if (unlikely(err < 0)) { @@ -1046,10 +1053,6 @@ static long vhost_net_set_owner(struct vhost_net *n) int r; mutex_lock(&n->dev.mutex); - if (vhost_dev_has_owner(&n->dev)) { - r = -EBUSY; - goto out; - } r = vhost_net_set_ubuf_info(n); if (r) goto out; diff --git a/trunk/drivers/vhost/vhost.c b/trunk/drivers/vhost/vhost.c index 60aa5ad09a2f..beee7f5787e6 100644 --- a/trunk/drivers/vhost/vhost.c +++ b/trunk/drivers/vhost/vhost.c @@ -343,12 +343,6 @@ static int vhost_attach_cgroups(struct vhost_dev *dev) return attach.ret; } -/* Caller should have device mutex */ -bool vhost_dev_has_owner(struct vhost_dev *dev) -{ - return dev->mm; -} - /* Caller should have device mutex */ long vhost_dev_set_owner(struct vhost_dev *dev) { @@ -356,7 +350,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev) int err; /* Is there an owner already? */ - if (vhost_dev_has_owner(dev)) { + if (dev->mm) { err = -EBUSY; goto err_mm; } diff --git a/trunk/drivers/vhost/vhost.h b/trunk/drivers/vhost/vhost.h index 64adcf99ff33..a7ad63592987 100644 --- a/trunk/drivers/vhost/vhost.h +++ b/trunk/drivers/vhost/vhost.h @@ -133,7 +133,6 @@ struct vhost_dev { long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); long vhost_dev_set_owner(struct vhost_dev *dev); -bool vhost_dev_has_owner(struct vhost_dev *dev); long vhost_dev_check_owner(struct vhost_dev *); struct vhost_memory *vhost_dev_reset_owner_prepare(void); void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *); diff --git a/trunk/drivers/vhost/vringh.c b/trunk/drivers/vhost/vringh.c index 5174ebac288d..bff0775e258c 100644 --- a/trunk/drivers/vhost/vringh.c +++ b/trunk/drivers/vhost/vringh.c @@ -3,7 +3,6 @@ * * Since these may be in userspace, we use (inline) accessors. */ -#include #include #include #include @@ -1006,5 +1005,3 @@ int vringh_need_notify_kern(struct vringh *vrh) return __vringh_need_notify(vrh, getu16_kern); } EXPORT_SYMBOL(vringh_need_notify_kern); - -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/video/Kconfig b/trunk/drivers/video/Kconfig index 2e937bdace6f..d71d60f94fc1 100644 --- a/trunk/drivers/video/Kconfig +++ b/trunk/drivers/video/Kconfig @@ -2199,7 +2199,7 @@ config FB_XILINX config FB_GOLDFISH tristate "Goldfish Framebuffer" - depends on FB && HAS_DMA + depends on FB select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -2453,23 +2453,6 @@ config FB_HYPERV help This framebuffer driver supports Microsoft Hyper-V Synthetic Video. -config FB_SIMPLE - bool "Simple framebuffer support" - depends on (FB = y) && OF - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - help - Say Y if you want support for a simple frame-buffer. - - This driver assumes that the display hardware has been initialized - before the kernel boots, and the kernel will simply render to the - pre-allocated frame buffer surface. - - Configuration re: surface address, size, and format must be provided - through device tree, or potentially plain old platform data in the - future. - source "drivers/video/omap/Kconfig" source "drivers/video/omap2/Kconfig" source "drivers/video/exynos/Kconfig" diff --git a/trunk/drivers/video/Makefile b/trunk/drivers/video/Makefile index e8bae8dd4804..7234e4a959e8 100644 --- a/trunk/drivers/video/Makefile +++ b/trunk/drivers/video/Makefile @@ -166,7 +166,6 @@ obj-$(CONFIG_FB_MX3) += mx3fb.o obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o obj-$(CONFIG_FB_MXS) += mxsfb.o obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o -obj-$(CONFIG_FB_SIMPLE) += simplefb.o # the test framebuffer is last obj-$(CONFIG_FB_VIRTUAL) += vfb.o diff --git a/trunk/drivers/video/atmel_lcdfb.c b/trunk/drivers/video/atmel_lcdfb.c index effdb373b8db..540909de6247 100644 --- a/trunk/drivers/video/atmel_lcdfb.c +++ b/trunk/drivers/video/atmel_lcdfb.c @@ -223,14 +223,8 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo) static void exit_backlight(struct atmel_lcdfb_info *sinfo) { - if (!sinfo->backlight) - return; - - if (sinfo->backlight->ops) { - sinfo->backlight->props.power = FB_BLANK_POWERDOWN; - sinfo->backlight->ops->update_status(sinfo->backlight); - } - backlight_device_unregister(sinfo->backlight); + if (sinfo->backlight) + backlight_device_unregister(sinfo->backlight); } #else @@ -467,11 +461,8 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var, if (info->fix.smem_len) { unsigned int smem_len = (var->xres_virtual * var->yres_virtual * ((var->bits_per_pixel + 7) / 8)); - if (smem_len > info->fix.smem_len) { - dev_err(dev, "Frame buffer is too small (%u) for screen size (need at least %u)\n", - info->fix.smem_len, smem_len); + if (smem_len > info->fix.smem_len) return -EINVAL; - } } /* Saturate vertical and horizontal timings at maximum values */ diff --git a/trunk/drivers/video/au1100fb.c b/trunk/drivers/video/au1100fb.c index ebeb9715f061..700cac067b46 100644 --- a/trunk/drivers/video/au1100fb.c +++ b/trunk/drivers/video/au1100fb.c @@ -385,6 +385,8 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 + vma->vm_flags |= VM_IO; + if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) { diff --git a/trunk/drivers/video/au1200fb.c b/trunk/drivers/video/au1200fb.c index 301224ecc950..1b59054fc6a4 100644 --- a/trunk/drivers/video/au1200fb.c +++ b/trunk/drivers/video/au1200fb.c @@ -1258,9 +1258,13 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */ + vma->vm_flags |= VM_IO; + return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); + + return 0; } static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) diff --git a/trunk/drivers/video/console/Makefile b/trunk/drivers/video/console/Makefile index 48da25c96cd3..a862e9173ebe 100644 --- a/trunk/drivers/video/console/Makefile +++ b/trunk/drivers/video/console/Makefile @@ -18,8 +18,6 @@ font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o font-objs += $(font-objs-y) -obj-$(CONFIG_FONTS) += font.o - # Each configuration option enables a list of files. obj-$(CONFIG_DUMMY_CONSOLE) += dummycon.o diff --git a/trunk/drivers/video/omap2/dss/core.c b/trunk/drivers/video/omap2/dss/core.c index c9c2252e3719..60cc6fee6548 100644 --- a/trunk/drivers/video/omap2/dss/core.c +++ b/trunk/drivers/video/omap2/dss/core.c @@ -53,8 +53,6 @@ static char *def_disp_name; module_param_named(def_disp, def_disp_name, charp, 0); MODULE_PARM_DESC(def_disp, "default display name"); -static bool dss_initialized; - const char *omapdss_get_default_display_name(void) { return core.default_display_name; @@ -68,12 +66,6 @@ enum omapdss_version omapdss_get_version(void) } EXPORT_SYMBOL(omapdss_get_version); -bool omapdss_is_initialized(void) -{ - return dss_initialized; -} -EXPORT_SYMBOL(omapdss_is_initialized); - struct platform_device *dss_get_core_pdev(void) { return core.pdev; @@ -611,8 +603,6 @@ static int __init omap_dss_init(void) return r; } - dss_initialized = true; - return 0; } @@ -643,15 +633,7 @@ static int __init omap_dss_init(void) static int __init omap_dss_init2(void) { - int r; - - r = omap_dss_register_drivers(); - if (r) - return r; - - dss_initialized = true; - - return 0; + return omap_dss_register_drivers(); } core_initcall(omap_dss_init); diff --git a/trunk/drivers/video/omap2/dss/hdmi.c b/trunk/drivers/video/omap2/dss/hdmi.c index a109934c0478..17f4d55c621c 100644 --- a/trunk/drivers/video/omap2/dss/hdmi.c +++ b/trunk/drivers/video/omap2/dss/hdmi.c @@ -1065,6 +1065,10 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev) mutex_init(&hdmi.ip_data.lock); res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0); + if (!res) { + DSSERR("can't get IORESOURCE_MEM HDMI\n"); + return -EINVAL; + } /* Base address taken from platform */ hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res); diff --git a/trunk/drivers/video/omap2/omapfb/omapfb-main.c b/trunk/drivers/video/omap2/omapfb/omapfb-main.c index 856917b33616..c84bb8a4d0c4 100644 --- a/trunk/drivers/video/omap2/omapfb/omapfb-main.c +++ b/trunk/drivers/video/omap2/omapfb/omapfb-main.c @@ -2416,9 +2416,6 @@ static int omapfb_probe(struct platform_device *pdev) DBG("omapfb_probe\n"); - if (omapdss_is_initialized() == false) - return -EPROBE_DEFER; - if (pdev->num_resources != 0) { dev_err(&pdev->dev, "probed for an unknown device\n"); r = -ENODEV; diff --git a/trunk/drivers/video/omap2/vrfb.c b/trunk/drivers/video/omap2/vrfb.c index f346b02eee1d..5261229c79af 100644 --- a/trunk/drivers/video/omap2/vrfb.c +++ b/trunk/drivers/video/omap2/vrfb.c @@ -353,6 +353,11 @@ static int __init vrfb_probe(struct platform_device *pdev) /* first resource is the register res, the rest are vrfb contexts */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "can't get vrfb base address\n"); + return -EINVAL; + } + vrfb_base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(vrfb_base)) return PTR_ERR(vrfb_base); diff --git a/trunk/drivers/video/ps3fb.c b/trunk/drivers/video/ps3fb.c index dbfe2c18a434..d9f08c653d62 100644 --- a/trunk/drivers/video/ps3fb.c +++ b/trunk/drivers/video/ps3fb.c @@ -710,7 +710,7 @@ static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma) r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len); dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n", - info->fix.smem_start + (vma->vm_pgoff << PAGE_SHIFT), + info->fix.smem_start + vma->vm_pgoff << PAGE_SHIFT, vma->vm_start); return r; diff --git a/trunk/drivers/video/pxa3xx-gcu.c b/trunk/drivers/video/pxa3xx-gcu.c index 7cf0b13d061b..97563c55af63 100644 --- a/trunk/drivers/video/pxa3xx-gcu.c +++ b/trunk/drivers/video/pxa3xx-gcu.c @@ -494,6 +494,7 @@ pxa3xx_gcu_misc_mmap(struct file *file, struct vm_area_struct *vma) if (size != resource_size(priv->resource_mem)) return -EINVAL; + vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return io_remap_pfn_range(vma, vma->vm_start, diff --git a/trunk/drivers/video/simplefb.c b/trunk/drivers/video/simplefb.c deleted file mode 100644 index e2e9e3e61b72..000000000000 --- a/trunk/drivers/video/simplefb.c +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Simplest possible simple frame-buffer driver, as a platform device - * - * Copyright (c) 2013, Stephen Warren - * - * Based on q40fb.c, which was: - * Copyright (C) 2001 Richard Zidlicky - * - * Also based on offb.c, which was: - * Copyright (C) 1997 Geert Uytterhoeven - * Copyright (C) 1996 Paul Mackerras - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include -#include -#include -#include - -static struct fb_fix_screeninfo simplefb_fix = { - .id = "simple", - .type = FB_TYPE_PACKED_PIXELS, - .visual = FB_VISUAL_TRUECOLOR, - .accel = FB_ACCEL_NONE, -}; - -static struct fb_var_screeninfo simplefb_var = { - .height = -1, - .width = -1, - .activate = FB_ACTIVATE_NOW, - .vmode = FB_VMODE_NONINTERLACED, -}; - -static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, - u_int transp, struct fb_info *info) -{ - u32 *pal = info->pseudo_palette; - u32 cr = red >> (16 - info->var.red.length); - u32 cg = green >> (16 - info->var.green.length); - u32 cb = blue >> (16 - info->var.blue.length); - u32 value; - - if (regno >= 16) - return -EINVAL; - - value = (cr << info->var.red.offset) | - (cg << info->var.green.offset) | - (cb << info->var.blue.offset); - if (info->var.transp.length > 0) { - u32 mask = (1 << info->var.transp.length) - 1; - mask <<= info->var.transp.offset; - value |= mask; - } - pal[regno] = value; - - return 0; -} - -static struct fb_ops simplefb_ops = { - .owner = THIS_MODULE, - .fb_setcolreg = simplefb_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, -}; - -struct simplefb_format { - const char *name; - u32 bits_per_pixel; - struct fb_bitfield red; - struct fb_bitfield green; - struct fb_bitfield blue; - struct fb_bitfield transp; -}; - -static struct simplefb_format simplefb_formats[] = { - { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0} }, -}; - -struct simplefb_params { - u32 width; - u32 height; - u32 stride; - struct simplefb_format *format; -}; - -static int simplefb_parse_dt(struct platform_device *pdev, - struct simplefb_params *params) -{ - struct device_node *np = pdev->dev.of_node; - int ret; - const char *format; - int i; - - ret = of_property_read_u32(np, "width", ¶ms->width); - if (ret) { - dev_err(&pdev->dev, "Can't parse width property\n"); - return ret; - } - - ret = of_property_read_u32(np, "height", ¶ms->height); - if (ret) { - dev_err(&pdev->dev, "Can't parse height property\n"); - return ret; - } - - ret = of_property_read_u32(np, "stride", ¶ms->stride); - if (ret) { - dev_err(&pdev->dev, "Can't parse stride property\n"); - return ret; - } - - ret = of_property_read_string(np, "format", &format); - if (ret) { - dev_err(&pdev->dev, "Can't parse format property\n"); - return ret; - } - params->format = NULL; - for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) { - if (strcmp(format, simplefb_formats[i].name)) - continue; - params->format = &simplefb_formats[i]; - break; - } - if (!params->format) { - dev_err(&pdev->dev, "Invalid format value\n"); - return -EINVAL; - } - - return 0; -} - -static int simplefb_probe(struct platform_device *pdev) -{ - int ret; - struct simplefb_params params; - struct fb_info *info; - struct resource *mem; - - if (fb_get_options("simplefb", NULL)) - return -ENODEV; - - ret = simplefb_parse_dt(pdev, ¶ms); - if (ret) - return ret; - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - dev_err(&pdev->dev, "No memory resource\n"); - return -EINVAL; - } - - info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev); - if (!info) - return -ENOMEM; - platform_set_drvdata(pdev, info); - - info->fix = simplefb_fix; - info->fix.smem_start = mem->start; - info->fix.smem_len = resource_size(mem); - info->fix.line_length = params.stride; - - info->var = simplefb_var; - info->var.xres = params.width; - info->var.yres = params.height; - info->var.xres_virtual = params.width; - info->var.yres_virtual = params.height; - info->var.bits_per_pixel = params.format->bits_per_pixel; - info->var.red = params.format->red; - info->var.green = params.format->green; - info->var.blue = params.format->blue; - info->var.transp = params.format->transp; - - info->fbops = &simplefb_ops; - info->flags = FBINFO_DEFAULT; - info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start, - info->fix.smem_len); - if (!info->screen_base) { - framebuffer_release(info); - return -ENODEV; - } - info->pseudo_palette = (void *)(info + 1); - - ret = register_framebuffer(info); - if (ret < 0) { - dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); - framebuffer_release(info); - return ret; - } - - dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); - - return 0; -} - -static int simplefb_remove(struct platform_device *pdev) -{ - struct fb_info *info = platform_get_drvdata(pdev); - - unregister_framebuffer(info); - framebuffer_release(info); - - return 0; -} - -static const struct of_device_id simplefb_of_match[] = { - { .compatible = "simple-framebuffer", }, - { }, -}; -MODULE_DEVICE_TABLE(of, simplefb_of_match); - -static struct platform_driver simplefb_driver = { - .driver = { - .name = "simple-framebuffer", - .owner = THIS_MODULE, - .of_match_table = simplefb_of_match, - }, - .probe = simplefb_probe, - .remove = simplefb_remove, -}; -module_platform_driver(simplefb_driver); - -MODULE_AUTHOR("Stephen Warren "); -MODULE_DESCRIPTION("Simple framebuffer driver"); -MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/w1/masters/omap_hdq.c b/trunk/drivers/w1/masters/omap_hdq.c index 6e94d8dd3d00..db2390aed387 100644 --- a/trunk/drivers/w1/masters/omap_hdq.c +++ b/trunk/drivers/w1/masters/omap_hdq.c @@ -555,6 +555,11 @@ static int omap_hdq_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hdq_data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_dbg(&pdev->dev, "unable to get resource\n"); + return -ENXIO; + } + hdq_data->hdq_base = devm_ioremap_resource(dev, res); if (IS_ERR(hdq_data->hdq_base)) return PTR_ERR(hdq_data->hdq_base); diff --git a/trunk/drivers/watchdog/ath79_wdt.c b/trunk/drivers/watchdog/ath79_wdt.c index 37cb09b27b63..d184c48a0482 100644 --- a/trunk/drivers/watchdog/ath79_wdt.c +++ b/trunk/drivers/watchdog/ath79_wdt.c @@ -248,6 +248,11 @@ static int ath79_wdt_probe(struct platform_device *pdev) return -EBUSY; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "no memory resource found\n"); + return -EINVAL; + } + wdt_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(wdt_base)) return PTR_ERR(wdt_base); diff --git a/trunk/drivers/watchdog/davinci_wdt.c b/trunk/drivers/watchdog/davinci_wdt.c index bead7740c86a..100d4fbfde2a 100644 --- a/trunk/drivers/watchdog/davinci_wdt.c +++ b/trunk/drivers/watchdog/davinci_wdt.c @@ -217,6 +217,11 @@ static int davinci_wdt_probe(struct platform_device *pdev) dev_info(dev, "heartbeat %d sec\n", heartbeat); wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (wdt_mem == NULL) { + dev_err(dev, "failed to get memory region resource\n"); + return -ENOENT; + } + wdt_base = devm_ioremap_resource(dev, wdt_mem); if (IS_ERR(wdt_base)) return PTR_ERR(wdt_base); diff --git a/trunk/drivers/watchdog/imx2_wdt.c b/trunk/drivers/watchdog/imx2_wdt.c index 62946c2cb4f8..ff908823688c 100644 --- a/trunk/drivers/watchdog/imx2_wdt.c +++ b/trunk/drivers/watchdog/imx2_wdt.c @@ -257,6 +257,11 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "can't get device resources\n"); + return -ENODEV; + } + imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(imx2_wdt.base)) return PTR_ERR(imx2_wdt.base); diff --git a/trunk/drivers/xen/Kconfig b/trunk/drivers/xen/Kconfig index 9e02d60a364b..f03bf501527f 100644 --- a/trunk/drivers/xen/Kconfig +++ b/trunk/drivers/xen/Kconfig @@ -19,10 +19,11 @@ config XEN_SELFBALLOONING by the current usage of anonymous memory ("committed AS") and controlled by various sysfs-settable parameters. Configuring FRONTSWAP is highly recommended; if it is not configured, self- - ballooning is disabled by default. If FRONTSWAP is configured, + ballooning is disabled by default but can be enabled with the + 'selfballooning' kernel boot parameter. If FRONTSWAP is configured, frontswap-selfshrinking is enabled by default but can be disabled - with the 'tmem.selfshrink=0' kernel boot parameter; and self-ballooning - is enabled by default but can be disabled with the 'tmem.selfballooning=0' + with the 'noselfshrink' kernel boot parameter; and self-ballooning + is enabled by default but can be disabled with the 'noselfballooning' kernel boot parameter. Note that systems without a sufficiently large swap device should not enable self-ballooning. diff --git a/trunk/drivers/xen/balloon.c b/trunk/drivers/xen/balloon.c index 930fb6817901..a56776dbe095 100644 --- a/trunk/drivers/xen/balloon.c +++ b/trunk/drivers/xen/balloon.c @@ -407,8 +407,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { - page = alloc_page(gfp); - if (page == NULL) { + if ((page = alloc_page(gfp)) == NULL) { nr_pages = i; state = BP_EAGAIN; break; diff --git a/trunk/drivers/xen/privcmd.c b/trunk/drivers/xen/privcmd.c index 2cfc24d76fc5..ca2b00e9d558 100644 --- a/trunk/drivers/xen/privcmd.c +++ b/trunk/drivers/xen/privcmd.c @@ -504,7 +504,7 @@ static void privcmd_close(struct vm_area_struct *vma) struct page **pages = vma->vm_private_data; int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) + if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages)) return; xen_unmap_domain_mfn_range(vma, numpgs, pages); diff --git a/trunk/drivers/xen/tmem.c b/trunk/drivers/xen/tmem.c index 0f0493c63371..e3600be4e7fa 100644 --- a/trunk/drivers/xen/tmem.c +++ b/trunk/drivers/xen/tmem.c @@ -11,7 +11,11 @@ #include #include #include + +/* temporary ifdef until include/linux/frontswap.h is upstream */ +#ifdef CONFIG_FRONTSWAP #include +#endif #include #include @@ -20,36 +24,6 @@ #include #include -#ifndef CONFIG_XEN_TMEM_MODULE -bool __read_mostly tmem_enabled = false; - -static int __init enable_tmem(char *s) -{ - tmem_enabled = true; - return 1; -} -__setup("tmem", enable_tmem); -#endif - -#ifdef CONFIG_CLEANCACHE -static bool cleancache __read_mostly = true; -module_param(cleancache, bool, S_IRUGO); -static bool selfballooning __read_mostly = true; -module_param(selfballooning, bool, S_IRUGO); -#endif /* CONFIG_CLEANCACHE */ - -#ifdef CONFIG_FRONTSWAP -static bool frontswap __read_mostly = true; -module_param(frontswap, bool, S_IRUGO); -#else /* CONFIG_FRONTSWAP */ -#define frontswap (0) -#endif /* CONFIG_FRONTSWAP */ - -#ifdef CONFIG_XEN_SELFBALLOONING -static bool selfshrinking __read_mostly = true; -module_param(selfshrinking, bool, S_IRUGO); -#endif /* CONFIG_XEN_SELFBALLOONING */ - #define TMEM_CONTROL 0 #define TMEM_NEW_POOL 1 #define TMEM_DESTROY_POOL 2 @@ -155,6 +129,16 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid) return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); } +#ifndef CONFIG_XEN_TMEM_MODULE +bool __read_mostly tmem_enabled = false; + +static int __init enable_tmem(char *s) +{ + tmem_enabled = true; + return 1; +} +__setup("tmem", enable_tmem); +#endif #ifdef CONFIG_CLEANCACHE static int xen_tmem_destroy_pool(u32 pool_id) @@ -246,6 +230,20 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize) return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); } +static bool disable_cleancache __read_mostly; +static bool disable_selfballooning __read_mostly; +#ifdef CONFIG_XEN_TMEM_MODULE +module_param(disable_cleancache, bool, S_IRUGO); +module_param(disable_selfballooning, bool, S_IRUGO); +#else +static int __init no_cleancache(char *s) +{ + disable_cleancache = true; + return 1; +} +__setup("nocleancache", no_cleancache); +#endif + static struct cleancache_ops tmem_cleancache_ops = { .put_page = tmem_cleancache_put_page, .get_page = tmem_cleancache_get_page, @@ -363,6 +361,20 @@ static void tmem_frontswap_init(unsigned ignored) xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); } +static bool disable_frontswap __read_mostly; +static bool disable_frontswap_selfshrinking __read_mostly; +#ifdef CONFIG_XEN_TMEM_MODULE +module_param(disable_frontswap, bool, S_IRUGO); +module_param(disable_frontswap_selfshrinking, bool, S_IRUGO); +#else +static int __init no_frontswap(char *s) +{ + disable_frontswap = true; + return 1; +} +__setup("nofrontswap", no_frontswap); +#endif + static struct frontswap_ops tmem_frontswap_ops = { .store = tmem_frontswap_store, .load = tmem_frontswap_load, @@ -370,6 +382,8 @@ static struct frontswap_ops tmem_frontswap_ops = { .invalidate_area = tmem_frontswap_flush_area, .init = tmem_frontswap_init }; +#else /* CONFIG_FRONTSWAP */ +#define disable_frontswap_selfshrinking 1 #endif static int xen_tmem_init(void) @@ -377,12 +391,12 @@ static int xen_tmem_init(void) if (!xen_domain()) return 0; #ifdef CONFIG_FRONTSWAP - if (tmem_enabled && frontswap) { + if (tmem_enabled && !disable_frontswap) { char *s = ""; - struct frontswap_ops *old_ops; + struct frontswap_ops *old_ops = + frontswap_register_ops(&tmem_frontswap_ops); tmem_frontswap_poolid = -1; - old_ops = frontswap_register_ops(&tmem_frontswap_ops); if (IS_ERR(old_ops) || old_ops) { if (IS_ERR(old_ops)) return PTR_ERR(old_ops); @@ -394,7 +408,7 @@ static int xen_tmem_init(void) #endif #ifdef CONFIG_CLEANCACHE BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); - if (tmem_enabled && cleancache) { + if (tmem_enabled && !disable_cleancache) { char *s = ""; struct cleancache_ops *old_ops = cleancache_register_ops(&tmem_cleancache_ops); @@ -405,15 +419,8 @@ static int xen_tmem_init(void) } #endif #ifdef CONFIG_XEN_SELFBALLOONING - /* - * There is no point of driving pages to the swap system if they - * aren't going anywhere in tmem universe. - */ - if (!frontswap) { - selfshrinking = false; - selfballooning = false; - } - xen_selfballoon_init(selfballooning, selfshrinking); + xen_selfballoon_init(!disable_selfballooning, + !disable_frontswap_selfshrinking); #endif return 0; } diff --git a/trunk/drivers/xen/xen-pciback/pci_stub.c b/trunk/drivers/xen/xen-pciback/pci_stub.c index 4e8ba38aa0c9..a2278ba7fb27 100644 --- a/trunk/drivers/xen/xen-pciback/pci_stub.c +++ b/trunk/drivers/xen/xen-pciback/pci_stub.c @@ -106,7 +106,7 @@ static void pcistub_device_release(struct kref *kref) else pci_restore_state(dev); - if (dev->msix_cap) { + if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { struct physdev_pci_device ppdev = { .seg = pci_domain_nr(dev->bus), .bus = dev->bus->number, @@ -371,7 +371,7 @@ static int pcistub_init_device(struct pci_dev *dev) if (err) goto config_release; - if (dev->msix_cap) { + if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { struct physdev_pci_device ppdev = { .seg = pci_domain_nr(dev->bus), .bus = dev->bus->number, diff --git a/trunk/drivers/xen/xen-selfballoon.c b/trunk/drivers/xen/xen-selfballoon.c index f70984a892aa..f2ef569c7cc1 100644 --- a/trunk/drivers/xen/xen-selfballoon.c +++ b/trunk/drivers/xen/xen-selfballoon.c @@ -53,12 +53,15 @@ * System configuration note: Selfballooning should not be enabled on * systems without a sufficiently large swap device configured; for best * results, it is recommended that total swap be increased by the size - * of the guest memory. Note, that selfballooning should be disabled by default - * if frontswap is not configured. Similarly selfballooning should be enabled - * by default if frontswap is configured and can be disabled with the - * "tmem.selfballooning=0" kernel boot option. Finally, when frontswap is - * configured, frontswap-selfshrinking can be disabled with the - * "tmem.selfshrink=0" kernel boot option. + * of the guest memory. Also, while technically not required to be + * configured, it is highly recommended that frontswap also be configured + * and enabled when selfballooning is running. So, selfballooning + * is disabled by default if frontswap is not configured and can only + * be enabled with the "selfballooning" kernel boot option; similarly + * selfballooning is enabled by default if frontswap is configured and + * can be disabled with the "noselfballooning" kernel boot option. Finally, + * when frontswap is configured, frontswap-selfshrinking can be disabled + * with the "noselfshrink" kernel boot option. * * Selfballooning is disallowed in domain0 and force-disabled. * @@ -117,6 +120,9 @@ static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process); /* Enable/disable with sysfs. */ static bool frontswap_selfshrinking __read_mostly; +/* Enable/disable with kernel boot option. */ +static bool use_frontswap_selfshrink = true; + /* * The default values for the following parameters were deemed reasonable * by experimentation, may be workload-dependent, and can all be @@ -170,6 +176,35 @@ static void frontswap_selfshrink(void) frontswap_shrink(tgt_frontswap_pages); } +static int __init xen_nofrontswap_selfshrink_setup(char *s) +{ + use_frontswap_selfshrink = false; + return 1; +} + +__setup("noselfshrink", xen_nofrontswap_selfshrink_setup); + +/* Disable with kernel boot option. */ +static bool use_selfballooning = true; + +static int __init xen_noselfballooning_setup(char *s) +{ + use_selfballooning = false; + return 1; +} + +__setup("noselfballooning", xen_noselfballooning_setup); +#else /* !CONFIG_FRONTSWAP */ +/* Enable with kernel boot option. */ +static bool use_selfballooning; + +static int __init xen_selfballooning_setup(char *s) +{ + use_selfballooning = true; + return 1; +} + +__setup("selfballooning", xen_selfballooning_setup); #endif /* CONFIG_FRONTSWAP */ #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) diff --git a/trunk/drivers/xen/xenbus/xenbus_client.c b/trunk/drivers/xen/xenbus/xenbus_client.c index ec097d6f964d..61786be9138b 100644 --- a/trunk/drivers/xen/xenbus/xenbus_client.c +++ b/trunk/drivers/xen/xenbus/xenbus_client.c @@ -534,7 +534,7 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); if (err) - goto out_err_free_ballooned_pages; + goto out_err; spin_lock(&xenbus_valloc_lock); list_add(&node->next, &xenbus_valloc_pages); @@ -543,9 +543,8 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, *vaddr = addr; return 0; - out_err_free_ballooned_pages: - free_xenballooned_pages(1, &node->page); out_err: + free_xenballooned_pages(1, &node->page); kfree(node); return err; } diff --git a/trunk/drivers/xen/xenbus/xenbus_comms.h b/trunk/drivers/xen/xenbus/xenbus_comms.h index e74f9c1fbd80..c8abd3b8a6c4 100644 --- a/trunk/drivers/xen/xenbus/xenbus_comms.h +++ b/trunk/drivers/xen/xenbus/xenbus_comms.h @@ -45,7 +45,6 @@ int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; -extern enum xenstore_init xen_store_domain_type; extern const struct file_operations xen_xenbus_fops; diff --git a/trunk/drivers/xen/xenbus/xenbus_dev_backend.c b/trunk/drivers/xen/xenbus/xenbus_dev_backend.c index a6f42fc01407..d73000800762 100644 --- a/trunk/drivers/xen/xenbus/xenbus_dev_backend.c +++ b/trunk/drivers/xen/xenbus/xenbus_dev_backend.c @@ -70,21 +70,22 @@ static long xenbus_alloc(domid_t domid) return err; } -static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, - unsigned long data) +static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; switch (cmd) { - case IOCTL_XENBUS_BACKEND_EVTCHN: - if (xen_store_evtchn > 0) - return xen_store_evtchn; - return -ENODEV; - case IOCTL_XENBUS_BACKEND_SETUP: - return xenbus_alloc(data); - default: - return -ENOTTY; + case IOCTL_XENBUS_BACKEND_EVTCHN: + if (xen_store_evtchn > 0) + return xen_store_evtchn; + return -ENODEV; + + case IOCTL_XENBUS_BACKEND_SETUP: + return xenbus_alloc(data); + + default: + return -ENOTTY; } } diff --git a/trunk/drivers/xen/xenbus/xenbus_probe.c b/trunk/drivers/xen/xenbus/xenbus_probe.c index 56cfaaa9d006..3325884c693f 100644 --- a/trunk/drivers/xen/xenbus/xenbus_probe.c +++ b/trunk/drivers/xen/xenbus/xenbus_probe.c @@ -69,9 +69,6 @@ EXPORT_SYMBOL_GPL(xen_store_evtchn); struct xenstore_domain_interface *xen_store_interface; EXPORT_SYMBOL_GPL(xen_store_interface); -enum xenstore_init xen_store_domain_type; -EXPORT_SYMBOL_GPL(xen_store_domain_type); - static unsigned long xen_store_mfn; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); @@ -722,11 +719,17 @@ static int __init xenstored_local_init(void) return err; } +enum xenstore_init { + UNKNOWN, + PV, + HVM, + LOCAL, +}; static int __init xenbus_init(void) { int err = 0; + enum xenstore_init usage = UNKNOWN; uint64_t v = 0; - xen_store_domain_type = XS_UNKNOWN; if (!xen_domain()) return -ENODEV; @@ -734,29 +737,29 @@ static int __init xenbus_init(void) xenbus_ring_ops_init(); if (xen_pv_domain()) - xen_store_domain_type = XS_PV; + usage = PV; if (xen_hvm_domain()) - xen_store_domain_type = XS_HVM; + usage = HVM; if (xen_hvm_domain() && xen_initial_domain()) - xen_store_domain_type = XS_LOCAL; + usage = LOCAL; if (xen_pv_domain() && !xen_start_info->store_evtchn) - xen_store_domain_type = XS_LOCAL; + usage = LOCAL; if (xen_pv_domain() && xen_start_info->store_evtchn) xenstored_ready = 1; - switch (xen_store_domain_type) { - case XS_LOCAL: + switch (usage) { + case LOCAL: err = xenstored_local_init(); if (err) goto out_error; xen_store_interface = mfn_to_virt(xen_store_mfn); break; - case XS_PV: + case PV: xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); break; - case XS_HVM: + case HVM: err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto out_error; diff --git a/trunk/drivers/xen/xenbus/xenbus_probe.h b/trunk/drivers/xen/xenbus/xenbus_probe.h index 146f857a36f8..bb4f92ed8730 100644 --- a/trunk/drivers/xen/xenbus/xenbus_probe.h +++ b/trunk/drivers/xen/xenbus/xenbus_probe.h @@ -47,13 +47,6 @@ struct xen_bus_type { struct bus_type bus; }; -enum xenstore_init { - XS_UNKNOWN, - XS_PV, - XS_HVM, - XS_LOCAL, -}; - extern struct device_attribute xenbus_dev_attrs[]; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); diff --git a/trunk/drivers/xen/xenbus/xenbus_probe_frontend.c b/trunk/drivers/xen/xenbus/xenbus_probe_frontend.c index a7e25073de19..3159a37d966d 100644 --- a/trunk/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/trunk/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -29,8 +29,6 @@ #include "xenbus_probe.h" -static struct workqueue_struct *xenbus_frontend_wq; - /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { @@ -91,40 +89,9 @@ static void backend_changed(struct xenbus_watch *watch, xenbus_otherend_changed(watch, vec, len, 1); } -static void xenbus_frontend_delayed_resume(struct work_struct *w) -{ - struct xenbus_device *xdev = container_of(w, struct xenbus_device, work); - - xenbus_dev_resume(&xdev->dev); -} - -static int xenbus_frontend_dev_resume(struct device *dev) -{ - /* - * If xenstored is running in this domain, we cannot access the backend - * state at the moment, so we need to defer xenbus_dev_resume - */ - if (xen_store_domain_type == XS_LOCAL) { - struct xenbus_device *xdev = to_xenbus_device(dev); - - if (!xenbus_frontend_wq) { - pr_err("%s: no workqueue to process delayed resume\n", - xdev->nodename); - return -EFAULT; - } - - INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume); - queue_work(xenbus_frontend_wq, &xdev->work); - - return 0; - } - - return xenbus_dev_resume(dev); -} - static const struct dev_pm_ops xenbus_pm_ops = { .suspend = xenbus_dev_suspend, - .resume = xenbus_frontend_dev_resume, + .resume = xenbus_dev_resume, .freeze = xenbus_dev_suspend, .thaw = xenbus_dev_cancel, .restore = xenbus_dev_resume, @@ -473,8 +440,6 @@ static int __init xenbus_probe_frontend_init(void) register_xenstore_notifier(&xenstore_notifier); - xenbus_frontend_wq = create_workqueue("xenbus_frontend"); - return 0; } subsys_initcall(xenbus_probe_frontend_init); diff --git a/trunk/fs/9p/vfs_addr.c b/trunk/fs/9p/vfs_addr.c index 9ff073f4090a..055562c580b4 100644 --- a/trunk/fs/9p/vfs_addr.c +++ b/trunk/fs/9p/vfs_addr.c @@ -148,14 +148,13 @@ static int v9fs_release_page(struct page *page, gfp_t gfp) * @offset: offset in the page */ -static void v9fs_invalidate_page(struct page *page, unsigned int offset, - unsigned int length) +static void v9fs_invalidate_page(struct page *page, unsigned long offset) { /* * If called with zero offset, we should release * the private state assocated with the page */ - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0) v9fs_fscache_invalidate_page(page); } diff --git a/trunk/fs/9p/vfs_dir.c b/trunk/fs/9p/vfs_dir.c index 4d0c2e0be7e5..be1e34adc3c6 100644 --- a/trunk/fs/9p/vfs_dir.c +++ b/trunk/fs/9p/vfs_dir.c @@ -101,15 +101,16 @@ static struct p9_rdir *v9fs_alloc_rdir_buf(struct file *filp, int buflen) } /** - * v9fs_dir_readdir - iterate through a directory - * @file: opened file structure - * @ctx: actor we feed the entries to + * v9fs_dir_readdir - read a directory + * @filp: opened file structure + * @dirent: directory structure ??? + * @filldir: function to populate directory structure ??? * */ -static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) +static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) { - bool over; + int over; struct p9_wstat st; int err = 0; struct p9_fid *fid; @@ -117,19 +118,19 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) int reclen = 0; struct p9_rdir *rdir; - p9_debug(P9_DEBUG_VFS, "name %s\n", file->f_path.dentry->d_name.name); - fid = file->private_data; + p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); + fid = filp->private_data; buflen = fid->clnt->msize - P9_IOHDRSZ; - rdir = v9fs_alloc_rdir_buf(file, buflen); + rdir = v9fs_alloc_rdir_buf(filp, buflen); if (!rdir) return -ENOMEM; while (1) { if (rdir->tail == rdir->head) { - err = v9fs_file_readn(file, rdir->buf, NULL, - buflen, ctx->pos); + err = v9fs_file_readn(filp, rdir->buf, NULL, + buflen, filp->f_pos); if (err <= 0) return err; @@ -147,45 +148,51 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) } reclen = st.size+2; - over = !dir_emit(ctx, st.name, strlen(st.name), - v9fs_qid2ino(&st.qid), dt_type(&st)); + over = filldir(dirent, st.name, strlen(st.name), + filp->f_pos, v9fs_qid2ino(&st.qid), dt_type(&st)); + p9stat_free(&st); + if (over) return 0; rdir->head += reclen; - ctx->pos += reclen; + filp->f_pos += reclen; } } } /** - * v9fs_dir_readdir_dotl - iterate through a directory - * @file: opened file structure - * @ctx: actor we feed the entries to + * v9fs_dir_readdir_dotl - read a directory + * @filp: opened file structure + * @dirent: buffer to fill dirent structures + * @filldir: function to populate dirent structures * */ -static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx) +static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent, + filldir_t filldir) { + int over; int err = 0; struct p9_fid *fid; int buflen; struct p9_rdir *rdir; struct p9_dirent curdirent; + u64 oldoffset = 0; - p9_debug(P9_DEBUG_VFS, "name %s\n", file->f_path.dentry->d_name.name); - fid = file->private_data; + p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); + fid = filp->private_data; buflen = fid->clnt->msize - P9_READDIRHDRSZ; - rdir = v9fs_alloc_rdir_buf(file, buflen); + rdir = v9fs_alloc_rdir_buf(filp, buflen); if (!rdir) return -ENOMEM; while (1) { if (rdir->tail == rdir->head) { err = p9_client_readdir(fid, rdir->buf, buflen, - ctx->pos); + filp->f_pos); if (err <= 0) return err; @@ -203,13 +210,22 @@ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx) return -EIO; } - if (!dir_emit(ctx, curdirent.d_name, - strlen(curdirent.d_name), - v9fs_qid2ino(&curdirent.qid), - curdirent.d_type)) + /* d_off in dirent structure tracks the offset into + * the next dirent in the dir. However, filldir() + * expects offset into the current dirent. Hence + * while calling filldir send the offset from the + * previous dirent structure. + */ + over = filldir(dirent, curdirent.d_name, + strlen(curdirent.d_name), + oldoffset, v9fs_qid2ino(&curdirent.qid), + curdirent.d_type); + oldoffset = curdirent.d_off; + + if (over) return 0; - ctx->pos = curdirent.d_off; + filp->f_pos = curdirent.d_off; rdir->head += err; } } @@ -238,7 +254,7 @@ int v9fs_dir_release(struct inode *inode, struct file *filp) const struct file_operations v9fs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, - .iterate = v9fs_dir_readdir, + .readdir = v9fs_dir_readdir, .open = v9fs_file_open, .release = v9fs_dir_release, }; @@ -246,7 +262,7 @@ const struct file_operations v9fs_dir_operations = { const struct file_operations v9fs_dir_operations_dotl = { .read = generic_read_dir, .llseek = generic_file_llseek, - .iterate = v9fs_dir_readdir_dotl, + .readdir = v9fs_dir_readdir_dotl, .open = v9fs_file_open, .release = v9fs_dir_release, .fsync = v9fs_file_fsync_dotl, diff --git a/trunk/fs/adfs/dir.c b/trunk/fs/adfs/dir.c index ade28bb058e3..9cf874ce8336 100644 --- a/trunk/fs/adfs/dir.c +++ b/trunk/fs/adfs/dir.c @@ -17,43 +17,47 @@ static DEFINE_RWLOCK(adfs_dir_lock); static int -adfs_readdir(struct file *file, struct dir_context *ctx) +adfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir; struct object_info obj; struct adfs_dir dir; int ret = 0; - if (ctx->pos >> 32) - return 0; + if (filp->f_pos >> 32) + goto out; ret = ops->read(sb, inode->i_ino, inode->i_size, &dir); if (ret) - return ret; + goto out; - if (ctx->pos == 0) { - if (!dir_emit_dot(file, ctx)) + switch ((unsigned long)filp->f_pos) { + case 0: + if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) goto free_out; - ctx->pos = 1; - } - if (ctx->pos == 1) { - if (!dir_emit(ctx, "..", 2, dir.parent_id, DT_DIR)) + filp->f_pos += 1; + + case 1: + if (filldir(dirent, "..", 2, 1, dir.parent_id, DT_DIR) < 0) goto free_out; - ctx->pos = 2; + filp->f_pos += 1; + + default: + break; } read_lock(&adfs_dir_lock); - ret = ops->setpos(&dir, ctx->pos - 2); + ret = ops->setpos(&dir, filp->f_pos - 2); if (ret) goto unlock_out; while (ops->getnext(&dir, &obj) == 0) { - if (!dir_emit(ctx, obj.name, obj.name_len, - obj.file_id, DT_UNKNOWN)) - break; - ctx->pos++; + if (filldir(dirent, obj.name, obj.name_len, + filp->f_pos, obj.file_id, DT_UNKNOWN) < 0) + goto unlock_out; + filp->f_pos += 1; } unlock_out: @@ -61,6 +65,8 @@ adfs_readdir(struct file *file, struct dir_context *ctx) free_out: ops->free(&dir); + +out: return ret; } @@ -186,7 +192,7 @@ adfs_dir_lookup_byname(struct inode *inode, struct qstr *name, struct object_inf const struct file_operations adfs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, - .iterate = adfs_readdir, + .readdir = adfs_readdir, .fsync = generic_file_fsync, }; diff --git a/trunk/fs/affs/dir.c b/trunk/fs/affs/dir.c index f1eba8c3644e..fd11a6d608ee 100644 --- a/trunk/fs/affs/dir.c +++ b/trunk/fs/affs/dir.c @@ -15,12 +15,12 @@ #include "affs.h" -static int affs_readdir(struct file *, struct dir_context *); +static int affs_readdir(struct file *, void *, filldir_t); const struct file_operations affs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, - .iterate = affs_readdir, + .readdir = affs_readdir, .fsync = affs_file_fsync, }; @@ -40,35 +40,52 @@ const struct inode_operations affs_dir_inode_operations = { }; static int -affs_readdir(struct file *file, struct dir_context *ctx) +affs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; - struct buffer_head *dir_bh = NULL; - struct buffer_head *fh_bh = NULL; + struct buffer_head *dir_bh; + struct buffer_head *fh_bh; unsigned char *name; int namelen; u32 i; int hash_pos; int chain_pos; + u32 f_pos; u32 ino; + int stored; + int res; - pr_debug("AFFS: readdir(ino=%lu,f_pos=%lx)\n",inode->i_ino,(unsigned long)ctx->pos); + pr_debug("AFFS: readdir(ino=%lu,f_pos=%lx)\n",inode->i_ino,(unsigned long)filp->f_pos); - if (ctx->pos < 2) { - file->private_data = (void *)0; - if (!dir_emit_dots(file, ctx)) + stored = 0; + res = -EIO; + dir_bh = NULL; + fh_bh = NULL; + f_pos = filp->f_pos; + + if (f_pos == 0) { + filp->private_data = (void *)0; + if (filldir(dirent, ".", 1, f_pos, inode->i_ino, DT_DIR) < 0) return 0; + filp->f_pos = f_pos = 1; + stored++; + } + if (f_pos == 1) { + if (filldir(dirent, "..", 2, f_pos, parent_ino(filp->f_path.dentry), DT_DIR) < 0) + return stored; + filp->f_pos = f_pos = 2; + stored++; } affs_lock_dir(inode); - chain_pos = (ctx->pos - 2) & 0xffff; - hash_pos = (ctx->pos - 2) >> 16; + chain_pos = (f_pos - 2) & 0xffff; + hash_pos = (f_pos - 2) >> 16; if (chain_pos == 0xffff) { affs_warning(sb, "readdir", "More than 65535 entries in chain"); chain_pos = 0; hash_pos++; - ctx->pos = ((hash_pos << 16) | chain_pos) + 2; + filp->f_pos = ((hash_pos << 16) | chain_pos) + 2; } dir_bh = affs_bread(sb, inode->i_ino); if (!dir_bh) @@ -77,8 +94,8 @@ affs_readdir(struct file *file, struct dir_context *ctx) /* If the directory hasn't changed since the last call to readdir(), * we can jump directly to where we left off. */ - ino = (u32)(long)file->private_data; - if (ino && file->f_version == inode->i_version) { + ino = (u32)(long)filp->private_data; + if (ino && filp->f_version == inode->i_version) { pr_debug("AFFS: readdir() left off=%d\n", ino); goto inside; } @@ -88,7 +105,7 @@ affs_readdir(struct file *file, struct dir_context *ctx) fh_bh = affs_bread(sb, ino); if (!fh_bh) { affs_error(sb, "readdir","Cannot read block %d", i); - return -EIO; + goto readdir_out; } ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain); affs_brelse(fh_bh); @@ -102,34 +119,38 @@ affs_readdir(struct file *file, struct dir_context *ctx) ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]); if (!ino) continue; - ctx->pos = (hash_pos << 16) + 2; + f_pos = (hash_pos << 16) + 2; inside: do { fh_bh = affs_bread(sb, ino); if (!fh_bh) { affs_error(sb, "readdir","Cannot read block %d", ino); - break; + goto readdir_done; } namelen = min(AFFS_TAIL(sb, fh_bh)->name[0], (u8)30); name = AFFS_TAIL(sb, fh_bh)->name + 1; pr_debug("AFFS: readdir(): filldir(\"%.*s\", ino=%u), hash=%d, f_pos=%x\n", - namelen, name, ino, hash_pos, (u32)ctx->pos); - if (!dir_emit(ctx, name, namelen, ino, DT_UNKNOWN)) + namelen, name, ino, hash_pos, f_pos); + if (filldir(dirent, name, namelen, f_pos, ino, DT_UNKNOWN) < 0) goto readdir_done; - ctx->pos++; + stored++; + f_pos++; ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain); affs_brelse(fh_bh); fh_bh = NULL; } while (ino); } readdir_done: - file->f_version = inode->i_version; - file->private_data = (void *)(long)ino; + filp->f_pos = f_pos; + filp->f_version = inode->i_version; + filp->private_data = (void *)(long)ino; + res = stored; readdir_out: affs_brelse(dir_bh); affs_brelse(fh_bh); affs_unlock_dir(inode); - return 0; + pr_debug("AFFS: readdir()=%d\n", stored); + return res; } diff --git a/trunk/fs/afs/dir.c b/trunk/fs/afs/dir.c index 34494fbead0a..7a465ed04444 100644 --- a/trunk/fs/afs/dir.c +++ b/trunk/fs/afs/dir.c @@ -22,7 +22,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags); static int afs_dir_open(struct inode *inode, struct file *file); -static int afs_readdir(struct file *file, struct dir_context *ctx); +static int afs_readdir(struct file *file, void *dirent, filldir_t filldir); static int afs_d_revalidate(struct dentry *dentry, unsigned int flags); static int afs_d_delete(const struct dentry *dentry); static void afs_d_release(struct dentry *dentry); @@ -43,7 +43,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, const struct file_operations afs_dir_file_operations = { .open = afs_dir_open, .release = afs_release, - .iterate = afs_readdir, + .readdir = afs_readdir, .lock = afs_lock, .llseek = generic_file_llseek, }; @@ -119,9 +119,9 @@ struct afs_dir_page { }; struct afs_lookup_cookie { - struct dir_context ctx; struct afs_fid fid; - struct qstr name; + const char *name; + size_t nlen; int found; }; @@ -228,18 +228,20 @@ static int afs_dir_open(struct inode *inode, struct file *file) /* * deal with one block in an AFS directory */ -static int afs_dir_iterate_block(struct dir_context *ctx, +static int afs_dir_iterate_block(unsigned *fpos, union afs_dir_block *block, - unsigned blkoff) + unsigned blkoff, + void *cookie, + filldir_t filldir) { union afs_dirent *dire; unsigned offset, next, curr; size_t nlen; - int tmp; + int tmp, ret; - _enter("%u,%x,%p,,",(unsigned)ctx->pos,blkoff,block); + _enter("%u,%x,%p,,",*fpos,blkoff,block); - curr = (ctx->pos - blkoff) / sizeof(union afs_dirent); + curr = (*fpos - blkoff) / sizeof(union afs_dirent); /* walk through the block, an entry at a time */ for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries; @@ -254,7 +256,7 @@ static int afs_dir_iterate_block(struct dir_context *ctx, _debug("ENT[%Zu.%u]: unused", blkoff / sizeof(union afs_dir_block), offset); if (offset >= curr) - ctx->pos = blkoff + + *fpos = blkoff + next * sizeof(union afs_dirent); continue; } @@ -300,15 +302,19 @@ static int afs_dir_iterate_block(struct dir_context *ctx, continue; /* found the next entry */ - if (!dir_emit(ctx, dire->u.name, nlen, + ret = filldir(cookie, + dire->u.name, + nlen, + blkoff + offset * sizeof(union afs_dirent), ntohl(dire->u.vnode), - ctx->actor == afs_lookup_filldir ? - ntohl(dire->u.unique) : DT_UNKNOWN)) { + filldir == afs_lookup_filldir ? + ntohl(dire->u.unique) : DT_UNKNOWN); + if (ret < 0) { _leave(" = 0 [full]"); return 0; } - ctx->pos = blkoff + next * sizeof(union afs_dirent); + *fpos = blkoff + next * sizeof(union afs_dirent); } _leave(" = 1 [more]"); @@ -318,8 +324,8 @@ static int afs_dir_iterate_block(struct dir_context *ctx, /* * iterate through the data blob that lists the contents of an AFS directory */ -static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, - struct key *key) +static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, + filldir_t filldir, struct key *key) { union afs_dir_block *dblock; struct afs_dir_page *dbuf; @@ -327,7 +333,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, unsigned blkoff, limit; int ret; - _enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos); + _enter("{%lu},%u,,", dir->i_ino, *fpos); if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) { _leave(" = -ESTALE"); @@ -335,13 +341,13 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, } /* round the file position up to the next entry boundary */ - ctx->pos += sizeof(union afs_dirent) - 1; - ctx->pos &= ~(sizeof(union afs_dirent) - 1); + *fpos += sizeof(union afs_dirent) - 1; + *fpos &= ~(sizeof(union afs_dirent) - 1); /* walk through the blocks in sequence */ ret = 0; - while (ctx->pos < dir->i_size) { - blkoff = ctx->pos & ~(sizeof(union afs_dir_block) - 1); + while (*fpos < dir->i_size) { + blkoff = *fpos & ~(sizeof(union afs_dir_block) - 1); /* fetch the appropriate page from the directory */ page = afs_dir_get_page(dir, blkoff / PAGE_SIZE, key); @@ -358,7 +364,8 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, do { dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) / sizeof(union afs_dir_block)]; - ret = afs_dir_iterate_block(ctx, dblock, blkoff); + ret = afs_dir_iterate_block(fpos, dblock, blkoff, + cookie, filldir); if (ret != 1) { afs_dir_put_page(page); goto out; @@ -366,7 +373,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, blkoff += sizeof(union afs_dir_block); - } while (ctx->pos < dir->i_size && blkoff < limit); + } while (*fpos < dir->i_size && blkoff < limit); afs_dir_put_page(page); ret = 0; @@ -380,10 +387,23 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, /* * read an AFS directory */ -static int afs_readdir(struct file *file, struct dir_context *ctx) +static int afs_readdir(struct file *file, void *cookie, filldir_t filldir) { - return afs_dir_iterate(file_inode(file), - ctx, file->private_data); + unsigned fpos; + int ret; + + _enter("{%Ld,{%lu}}", + file->f_pos, file_inode(file)->i_ino); + + ASSERT(file->private_data != NULL); + + fpos = file->f_pos; + ret = afs_dir_iterate(file_inode(file), &fpos, + cookie, filldir, file->private_data); + file->f_pos = fpos; + + _leave(" = %d", ret); + return ret; } /* @@ -396,16 +416,15 @@ static int afs_lookup_filldir(void *_cookie, const char *name, int nlen, { struct afs_lookup_cookie *cookie = _cookie; - _enter("{%s,%u},%s,%u,,%llu,%u", - cookie->name.name, cookie->name.len, name, nlen, + _enter("{%s,%Zu},%s,%u,,%llu,%u", + cookie->name, cookie->nlen, name, nlen, (unsigned long long) ino, dtype); /* insanity checks first */ BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048); BUILD_BUG_ON(sizeof(union afs_dirent) != 32); - if (cookie->name.len != nlen || - memcmp(cookie->name.name, name, nlen) != 0) { + if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) { _leave(" = 0 [no]"); return 0; } @@ -425,18 +444,24 @@ static int afs_lookup_filldir(void *_cookie, const char *name, int nlen, static int afs_do_lookup(struct inode *dir, struct dentry *dentry, struct afs_fid *fid, struct key *key) { - struct afs_super_info *as = dir->i_sb->s_fs_info; - struct afs_lookup_cookie cookie = { - .ctx.actor = afs_lookup_filldir, - .name = dentry->d_name, - .fid.vid = as->volume->vid - }; + struct afs_lookup_cookie cookie; + struct afs_super_info *as; + unsigned fpos; int ret; _enter("{%lu},%p{%s},", dir->i_ino, dentry, dentry->d_name.name); + as = dir->i_sb->s_fs_info; + /* search the directory */ - ret = afs_dir_iterate(dir, &cookie.ctx, key); + cookie.name = dentry->d_name.name; + cookie.nlen = dentry->d_name.len; + cookie.fid.vid = as->volume->vid; + cookie.found = 0; + + fpos = 0; + ret = afs_dir_iterate(dir, &fpos, &cookie, afs_lookup_filldir, + key); if (ret < 0) { _leave(" = %d [iter]", ret); return ret; diff --git a/trunk/fs/afs/file.c b/trunk/fs/afs/file.c index 66d50fe2ee45..8f6e9234d565 100644 --- a/trunk/fs/afs/file.c +++ b/trunk/fs/afs/file.c @@ -19,8 +19,7 @@ #include "internal.h" static int afs_readpage(struct file *file, struct page *page); -static void afs_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); +static void afs_invalidatepage(struct page *page, unsigned long offset); static int afs_releasepage(struct page *page, gfp_t gfp_flags); static int afs_launder_page(struct page *page); @@ -311,17 +310,16 @@ static int afs_launder_page(struct page *page) * - release a page and clean up its private data if offset is 0 (indicating * the entire page) */ -static void afs_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void afs_invalidatepage(struct page *page, unsigned long offset) { struct afs_writeback *wb = (struct afs_writeback *) page_private(page); - _enter("{%lu},%u,%u", page->index, offset, length); + _enter("{%lu},%lu", page->index, offset); BUG_ON(!PageLocked(page)); /* we clean up only if the entire page is being invalidated */ - if (offset == 0 && length == PAGE_CACHE_SIZE) { + if (offset == 0) { #ifdef CONFIG_AFS_FSCACHE if (PageFsCache(page)) { struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); diff --git a/trunk/fs/aio.c b/trunk/fs/aio.c index 2bbcacf74d0c..c5b1a8c10411 100644 --- a/trunk/fs/aio.c +++ b/trunk/fs/aio.c @@ -141,6 +141,9 @@ static void aio_free_ring(struct kioctx *ctx) for (i = 0; i < ctx->nr_pages; i++) put_page(ctx->ring_pages[i]); + if (ctx->mmap_size) + vm_munmap(ctx->mmap_base, ctx->mmap_size); + if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) kfree(ctx->ring_pages); } @@ -304,9 +307,7 @@ static void free_ioctx(struct kioctx *ctx) kunmap_atomic(ring); while (atomic_read(&ctx->reqs_active) > 0) { - wait_event(ctx->wait, - head != ctx->tail || - atomic_read(&ctx->reqs_active) <= 0); + wait_event(ctx->wait, head != ctx->tail); avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; @@ -319,6 +320,11 @@ static void free_ioctx(struct kioctx *ctx) aio_free_ring(ctx); + spin_lock(&aio_nr_lock); + BUG_ON(aio_nr - ctx->max_reqs > aio_nr); + aio_nr -= ctx->max_reqs; + spin_unlock(&aio_nr_lock); + pr_debug("freeing %p\n", ctx); /* @@ -427,24 +433,17 @@ static void kill_ioctx(struct kioctx *ctx) { if (!atomic_xchg(&ctx->dead, 1)) { hlist_del_rcu(&ctx->list); + /* Between hlist_del_rcu() and dropping the initial ref */ + synchronize_rcu(); /* - * It'd be more correct to do this in free_ioctx(), after all - * the outstanding kiocbs have finished - but by then io_destroy - * has already returned, so io_setup() could potentially return - * -EAGAIN with no ioctxs actually in use (as far as userspace - * could tell). + * We can't punt to workqueue here because put_ioctx() -> + * free_ioctx() will unmap the ringbuffer, and that has to be + * done in the original process's context. kill_ioctx_rcu/work() + * exist for exit_aio(), as in that path free_ioctx() won't do + * the unmap. */ - spin_lock(&aio_nr_lock); - BUG_ON(aio_nr - ctx->max_reqs > aio_nr); - aio_nr -= ctx->max_reqs; - spin_unlock(&aio_nr_lock); - - if (ctx->mmap_size) - vm_munmap(ctx->mmap_base, ctx->mmap_size); - - /* Between hlist_del_rcu() and dropping the initial ref */ - call_rcu(&ctx->rcu_head, kill_ioctx_rcu); + kill_ioctx_work(&ctx->rcu_work); } } @@ -494,7 +493,10 @@ void exit_aio(struct mm_struct *mm) */ ctx->mmap_size = 0; - kill_ioctx(ctx); + if (!atomic_xchg(&ctx->dead, 1)) { + hlist_del_rcu(&ctx->list); + call_rcu(&ctx->rcu_head, kill_ioctx_rcu); + } } } @@ -1297,7 +1299,8 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, * < min_nr if the timeout specified by timeout has elapsed * before sufficient events are available, where timeout == NULL * specifies an infinite timeout. Note that the timeout pointed to by - * timeout is relative. Will fail with -ENOSYS if not implemented. + * timeout is relative and will be updated if not NULL and the + * operation blocks. Will fail with -ENOSYS if not implemented. */ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, long, min_nr, diff --git a/trunk/fs/autofs4/root.c b/trunk/fs/autofs4/root.c index ca8e55548d98..085da86e07c2 100644 --- a/trunk/fs/autofs4/root.c +++ b/trunk/fs/autofs4/root.c @@ -41,7 +41,7 @@ const struct file_operations autofs4_root_operations = { .open = dcache_dir_open, .release = dcache_dir_close, .read = generic_read_dir, - .iterate = dcache_readdir, + .readdir = dcache_readdir, .llseek = dcache_dir_lseek, .unlocked_ioctl = autofs4_root_ioctl, #ifdef CONFIG_COMPAT @@ -53,7 +53,7 @@ const struct file_operations autofs4_dir_operations = { .open = autofs4_dir_open, .release = dcache_dir_close, .read = generic_read_dir, - .iterate = dcache_readdir, + .readdir = dcache_readdir, .llseek = dcache_dir_lseek, }; diff --git a/trunk/fs/bad_inode.c b/trunk/fs/bad_inode.c index 7c93953030fb..922ad460bff9 100644 --- a/trunk/fs/bad_inode.c +++ b/trunk/fs/bad_inode.c @@ -45,7 +45,7 @@ static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov, return -EIO; } -static int bad_file_readdir(struct file *file, struct dir_context *ctx) +static int bad_file_readdir(struct file *filp, void *dirent, filldir_t filldir) { return -EIO; } @@ -152,7 +152,7 @@ static const struct file_operations bad_file_ops = .write = bad_file_write, .aio_read = bad_file_aio_read, .aio_write = bad_file_aio_write, - .iterate = bad_file_readdir, + .readdir = bad_file_readdir, .poll = bad_file_poll, .unlocked_ioctl = bad_file_unlocked_ioctl, .compat_ioctl = bad_file_compat_ioctl, diff --git a/trunk/fs/befs/linuxvfs.c b/trunk/fs/befs/linuxvfs.c index e9c75e20db32..8615ee89ab55 100644 --- a/trunk/fs/befs/linuxvfs.c +++ b/trunk/fs/befs/linuxvfs.c @@ -31,7 +31,7 @@ MODULE_LICENSE("GPL"); /* The units the vfs expects inode->i_blocks to be in */ #define VFS_BLOCK_SIZE 512 -static int befs_readdir(struct file *, struct dir_context *); +static int befs_readdir(struct file *, void *, filldir_t); static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int); static int befs_readpage(struct file *file, struct page *page); static sector_t befs_bmap(struct address_space *mapping, sector_t block); @@ -66,7 +66,7 @@ static struct kmem_cache *befs_inode_cachep; static const struct file_operations befs_dir_operations = { .read = generic_read_dir, - .iterate = befs_readdir, + .readdir = befs_readdir, .llseek = generic_file_llseek, }; @@ -211,9 +211,9 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) } static int -befs_readdir(struct file *file, struct dir_context *ctx) +befs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; befs_data_stream *ds = &BEFS_I(inode)->i_data.ds; befs_off_t value; @@ -221,14 +221,15 @@ befs_readdir(struct file *file, struct dir_context *ctx) size_t keysize; unsigned char d_type; char keybuf[BEFS_NAME_LEN + 1]; - const char *dirname = file->f_path.dentry->d_name.name; + char *nlsname; + int nlsnamelen; + const char *dirname = filp->f_path.dentry->d_name.name; befs_debug(sb, "---> befs_readdir() " - "name %s, inode %ld, ctx->pos %Ld", - dirname, inode->i_ino, ctx->pos); + "name %s, inode %ld, filp->f_pos %Ld", + dirname, inode->i_ino, filp->f_pos); -more: - result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1, + result = befs_btree_read(sb, ds, filp->f_pos, BEFS_NAME_LEN + 1, keybuf, &keysize, &value); if (result == BEFS_ERR) { @@ -250,29 +251,24 @@ befs_readdir(struct file *file, struct dir_context *ctx) /* Convert to NLS */ if (BEFS_SB(sb)->nls) { - char *nlsname; - int nlsnamelen; result = befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen); if (result < 0) { befs_debug(sb, "<--- befs_readdir() ERROR"); return result; } - if (!dir_emit(ctx, nlsname, nlsnamelen, - (ino_t) value, d_type)) { - kfree(nlsname); - return 0; - } + result = filldir(dirent, nlsname, nlsnamelen, filp->f_pos, + (ino_t) value, d_type); kfree(nlsname); + } else { - if (!dir_emit(ctx, keybuf, keysize, - (ino_t) value, d_type)) - return 0; + result = filldir(dirent, keybuf, keysize, filp->f_pos, + (ino_t) value, d_type); } - ctx->pos++; - goto more; - befs_debug(sb, "<--- befs_readdir() pos %Ld", ctx->pos); + filp->f_pos++; + + befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos); return 0; } diff --git a/trunk/fs/bfs/dir.c b/trunk/fs/bfs/dir.c index a399e6d9dc74..3f422f6bb5ca 100644 --- a/trunk/fs/bfs/dir.c +++ b/trunk/fs/bfs/dir.c @@ -26,51 +26,58 @@ static struct buffer_head *bfs_find_entry(struct inode *dir, const unsigned char *name, int namelen, struct bfs_dirent **res_dir); -static int bfs_readdir(struct file *f, struct dir_context *ctx) +static int bfs_readdir(struct file *f, void *dirent, filldir_t filldir) { struct inode *dir = file_inode(f); struct buffer_head *bh; struct bfs_dirent *de; + struct bfs_sb_info *info = BFS_SB(dir->i_sb); unsigned int offset; int block; - if (ctx->pos & (BFS_DIRENT_SIZE - 1)) { + mutex_lock(&info->bfs_lock); + + if (f->f_pos & (BFS_DIRENT_SIZE - 1)) { printf("Bad f_pos=%08lx for %s:%08lx\n", - (unsigned long)ctx->pos, + (unsigned long)f->f_pos, dir->i_sb->s_id, dir->i_ino); - return -EINVAL; + mutex_unlock(&info->bfs_lock); + return -EBADF; } - while (ctx->pos < dir->i_size) { - offset = ctx->pos & (BFS_BSIZE - 1); - block = BFS_I(dir)->i_sblock + (ctx->pos >> BFS_BSIZE_BITS); + while (f->f_pos < dir->i_size) { + offset = f->f_pos & (BFS_BSIZE - 1); + block = BFS_I(dir)->i_sblock + (f->f_pos >> BFS_BSIZE_BITS); bh = sb_bread(dir->i_sb, block); if (!bh) { - ctx->pos += BFS_BSIZE - offset; + f->f_pos += BFS_BSIZE - offset; continue; } do { de = (struct bfs_dirent *)(bh->b_data + offset); if (de->ino) { int size = strnlen(de->name, BFS_NAMELEN); - if (!dir_emit(ctx, de->name, size, + if (filldir(dirent, de->name, size, f->f_pos, le16_to_cpu(de->ino), - DT_UNKNOWN)) { + DT_UNKNOWN) < 0) { brelse(bh); + mutex_unlock(&info->bfs_lock); return 0; } } offset += BFS_DIRENT_SIZE; - ctx->pos += BFS_DIRENT_SIZE; - } while ((offset < BFS_BSIZE) && (ctx->pos < dir->i_size)); + f->f_pos += BFS_DIRENT_SIZE; + } while ((offset < BFS_BSIZE) && (f->f_pos < dir->i_size)); brelse(bh); } - return 0; + + mutex_unlock(&info->bfs_lock); + return 0; } const struct file_operations bfs_dir_operations = { .read = generic_read_dir, - .iterate = bfs_readdir, + .readdir = bfs_readdir, .fsync = generic_file_fsync, .llseek = generic_file_llseek, }; diff --git a/trunk/fs/btrfs/backref.c b/trunk/fs/btrfs/backref.c index 290e347b6db3..b4fb41558111 100644 --- a/trunk/fs/btrfs/backref.c +++ b/trunk/fs/btrfs/backref.c @@ -918,8 +918,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, ref->parent, bsz, 0); if (!eb || !extent_buffer_uptodate(eb)) { free_extent_buffer(eb); - ret = -EIO; - goto out; + return -EIO; } ret = find_extent_in_eb(eb, bytenr, *extent_item_pos, &eie); diff --git a/trunk/fs/btrfs/check-integrity.c b/trunk/fs/btrfs/check-integrity.c index 1431a6965017..18af6f48781a 100644 --- a/trunk/fs/btrfs/check-integrity.c +++ b/trunk/fs/btrfs/check-integrity.c @@ -1700,7 +1700,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, unsigned int j; DECLARE_COMPLETION_ONSTACK(complete); - bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); + bio = bio_alloc(GFP_NOFS, num_pages - i); if (!bio) { printk(KERN_INFO "btrfsic: bio_alloc() for %u pages failed!\n", diff --git a/trunk/fs/btrfs/ctree.c b/trunk/fs/btrfs/ctree.c index 02fae7f7e42c..de6de8e60b46 100644 --- a/trunk/fs/btrfs/ctree.c +++ b/trunk/fs/btrfs/ctree.c @@ -951,12 +951,10 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, BUG_ON(ret); /* -ENOMEM */ } if (new_flags != 0) { - int level = btrfs_header_level(buf); - ret = btrfs_set_disk_extent_flags(trans, root, buf->start, buf->len, - new_flags, level, 0); + new_flags, 0); if (ret) return ret; } diff --git a/trunk/fs/btrfs/ctree.h b/trunk/fs/btrfs/ctree.h index d6dd49b51ba8..63c328a9ce95 100644 --- a/trunk/fs/btrfs/ctree.h +++ b/trunk/fs/btrfs/ctree.h @@ -88,12 +88,12 @@ struct btrfs_ordered_sum; /* holds checksums of all the data extents */ #define BTRFS_CSUM_TREE_OBJECTID 7ULL -/* holds quota configuration and tracking */ -#define BTRFS_QUOTA_TREE_OBJECTID 8ULL - /* for storing balance parameters in the root tree */ #define BTRFS_BALANCE_OBJECTID -4ULL +/* holds quota configuration and tracking */ +#define BTRFS_QUOTA_TREE_OBJECTID 8ULL + /* orhpan objectid for tracking unlinked/truncated files */ #define BTRFS_ORPHAN_OBJECTID -5ULL @@ -3075,7 +3075,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 flags, - int level, int is_data); + int is_data); int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, diff --git a/trunk/fs/btrfs/delayed-inode.c b/trunk/fs/btrfs/delayed-inode.c index eb34438ddedb..f26f38ccd194 100644 --- a/trunk/fs/btrfs/delayed-inode.c +++ b/trunk/fs/btrfs/delayed-inode.c @@ -1681,7 +1681,8 @@ int btrfs_should_delete_dir_index(struct list_head *del_list, * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree * */ -int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, +int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, + filldir_t filldir, struct list_head *ins_list) { struct btrfs_dir_item *di; @@ -1703,13 +1704,13 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, list_for_each_entry_safe(curr, next, ins_list, readdir_list) { list_del(&curr->readdir_list); - if (curr->key.offset < ctx->pos) { + if (curr->key.offset < filp->f_pos) { if (atomic_dec_and_test(&curr->refs)) kfree(curr); continue; } - ctx->pos = curr->key.offset; + filp->f_pos = curr->key.offset; di = (struct btrfs_dir_item *)curr->data; name = (char *)(di + 1); @@ -1718,7 +1719,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, d_type = btrfs_filetype_table[di->type]; btrfs_disk_key_to_cpu(&location, &di->location); - over = !dir_emit(ctx, name, name_len, + over = filldir(dirent, name, name_len, curr->key.offset, location.objectid, d_type); if (atomic_dec_and_test(&curr->refs)) diff --git a/trunk/fs/btrfs/delayed-inode.h b/trunk/fs/btrfs/delayed-inode.h index a4b38f934d14..1d5c5f7abe3e 100644 --- a/trunk/fs/btrfs/delayed-inode.h +++ b/trunk/fs/btrfs/delayed-inode.h @@ -139,7 +139,8 @@ void btrfs_put_delayed_items(struct list_head *ins_list, struct list_head *del_list); int btrfs_should_delete_dir_index(struct list_head *del_list, u64 index); -int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, +int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, + filldir_t filldir, struct list_head *ins_list); /* for init */ diff --git a/trunk/fs/btrfs/delayed-ref.h b/trunk/fs/btrfs/delayed-ref.h index 70b962cc177d..f75fcaf79aeb 100644 --- a/trunk/fs/btrfs/delayed-ref.h +++ b/trunk/fs/btrfs/delayed-ref.h @@ -60,7 +60,6 @@ struct btrfs_delayed_ref_node { struct btrfs_delayed_extent_op { struct btrfs_disk_key key; u64 flags_to_set; - int level; unsigned int update_key:1; unsigned int update_flags:1; unsigned int is_data:1; diff --git a/trunk/fs/btrfs/dev-replace.c b/trunk/fs/btrfs/dev-replace.c index 65241f32d3f8..7ba7b3900cb8 100644 --- a/trunk/fs/btrfs/dev-replace.c +++ b/trunk/fs/btrfs/dev-replace.c @@ -313,11 +313,6 @@ int btrfs_dev_replace_start(struct btrfs_root *root, struct btrfs_device *tgt_device = NULL; struct btrfs_device *src_device = NULL; - if (btrfs_fs_incompat(fs_info, RAID56)) { - pr_warn("btrfs: dev_replace cannot yet handle RAID5/RAID6\n"); - return -EINVAL; - } - switch (args->start.cont_reading_from_srcdev_mode) { case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index b0292b3ead54..4e9ebe1f1827 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -152,7 +152,7 @@ static struct btrfs_lockdep_keyset { { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, - { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, + { .id = BTRFS_ORPHAN_OBJECTID, .name_stem = "orphan" }, { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, @@ -1013,8 +1013,7 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags) return try_release_extent_buffer(page); } -static void btree_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void btree_invalidatepage(struct page *page, unsigned long offset) { struct extent_io_tree *tree; tree = &BTRFS_I(page->mapping->host)->io_tree; @@ -1514,6 +1513,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, } root->commit_root = btrfs_root_node(root); + BUG_ON(!root->node); /* -ENOMEM */ out: if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { root->ref_cows = 1; @@ -1988,33 +1988,30 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) { free_extent_buffer(info->tree_root->node); free_extent_buffer(info->tree_root->commit_root); - info->tree_root->node = NULL; - info->tree_root->commit_root = NULL; - - if (info->dev_root) { - free_extent_buffer(info->dev_root->node); - free_extent_buffer(info->dev_root->commit_root); - info->dev_root->node = NULL; - info->dev_root->commit_root = NULL; - } - if (info->extent_root) { - free_extent_buffer(info->extent_root->node); - free_extent_buffer(info->extent_root->commit_root); - info->extent_root->node = NULL; - info->extent_root->commit_root = NULL; - } - if (info->csum_root) { - free_extent_buffer(info->csum_root->node); - free_extent_buffer(info->csum_root->commit_root); - info->csum_root->node = NULL; - info->csum_root->commit_root = NULL; - } + free_extent_buffer(info->dev_root->node); + free_extent_buffer(info->dev_root->commit_root); + free_extent_buffer(info->extent_root->node); + free_extent_buffer(info->extent_root->commit_root); + free_extent_buffer(info->csum_root->node); + free_extent_buffer(info->csum_root->commit_root); if (info->quota_root) { free_extent_buffer(info->quota_root->node); free_extent_buffer(info->quota_root->commit_root); + } + + info->tree_root->node = NULL; + info->tree_root->commit_root = NULL; + info->dev_root->node = NULL; + info->dev_root->commit_root = NULL; + info->extent_root->node = NULL; + info->extent_root->commit_root = NULL; + info->csum_root->node = NULL; + info->csum_root->commit_root = NULL; + if (info->quota_root) { info->quota_root->node = NULL; info->quota_root->commit_root = NULL; } + if (chunk_root) { free_extent_buffer(info->chunk_root->node); free_extent_buffer(info->chunk_root->commit_root); @@ -2860,8 +2857,8 @@ int open_ctree(struct super_block *sb, btrfs_free_qgroup_config(fs_info); fail_trans_kthread: kthread_stop(fs_info->transaction_kthread); - btrfs_cleanup_transaction(fs_info->tree_root); del_fs_roots(fs_info); + btrfs_cleanup_transaction(fs_info->tree_root); fail_cleaner: kthread_stop(fs_info->cleaner_kthread); @@ -3131,7 +3128,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait) * caller */ device->flush_bio = NULL; - bio = btrfs_io_bio_alloc(GFP_NOFS, 0); + bio = bio_alloc(GFP_NOFS, 0); if (!bio) return -ENOMEM; @@ -3513,16 +3510,16 @@ int close_ctree(struct btrfs_root *root) percpu_counter_sum(&fs_info->delalloc_bytes)); } - btrfs_free_block_groups(fs_info); + free_root_pointers(fs_info, 1); - btrfs_stop_all_workers(fs_info); + btrfs_free_block_groups(fs_info); del_fs_roots(fs_info); - free_root_pointers(fs_info, 1); - iput(fs_info->btree_inode); + btrfs_stop_all_workers(fs_info); + #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY if (btrfs_test_opt(root, CHECK_INTEGRITY)) btrfsic_unmount(root, fs_info->fs_devices); @@ -3662,11 +3659,8 @@ static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, ordered_operations); list_del_init(&btrfs_inode->ordered_operations); - spin_unlock(&root->fs_info->ordered_extent_lock); btrfs_invalidate_inodes(btrfs_inode->root); - - spin_lock(&root->fs_info->ordered_extent_lock); } spin_unlock(&root->fs_info->ordered_extent_lock); @@ -3788,11 +3782,8 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) list_del_init(&btrfs_inode->delalloc_inodes); clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, &btrfs_inode->runtime_flags); - spin_unlock(&root->fs_info->delalloc_lock); btrfs_invalidate_inodes(btrfs_inode->root); - - spin_lock(&root->fs_info->delalloc_lock); } spin_unlock(&root->fs_info->delalloc_lock); @@ -3817,7 +3808,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, while (start <= end) { eb = btrfs_find_tree_block(root, start, root->leafsize); - start += root->leafsize; + start += eb->len; if (!eb) continue; wait_on_extent_buffer_writeback(eb); diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index df472ab1b5ac..2305b5c5cf00 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -2070,7 +2070,8 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, u32 item_size; int ret; int err = 0; - int metadata = !extent_op->is_data; + int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY || + node->type == BTRFS_SHARED_BLOCK_REF_KEY); if (trans->aborted) return 0; @@ -2085,8 +2086,11 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, key.objectid = node->bytenr; if (metadata) { + struct btrfs_delayed_tree_ref *tree_ref; + + tree_ref = btrfs_delayed_node_to_tree_ref(node); key.type = BTRFS_METADATA_ITEM_KEY; - key.offset = extent_op->level; + key.offset = tree_ref->level; } else { key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = node->num_bytes; @@ -2715,7 +2719,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 flags, - int level, int is_data) + int is_data) { struct btrfs_delayed_extent_op *extent_op; int ret; @@ -2728,7 +2732,6 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, extent_op->update_flags = 1; extent_op->update_key = 0; extent_op->is_data = is_data ? 1 : 0; - extent_op->level = level; ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr, num_bytes, extent_op); @@ -3106,11 +3109,6 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, WARN_ON(ret); if (i_size_read(inode) > 0) { - ret = btrfs_check_trunc_cache_free_space(root, - &root->fs_info->global_block_rsv); - if (ret) - goto out_put; - ret = btrfs_truncate_free_space_cache(root, trans, path, inode); if (ret) @@ -4564,8 +4562,6 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info) fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; - if (fs_info->quota_root) - fs_info->quota_root->block_rsv = &fs_info->global_block_rsv; fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; update_global_block_rsv(fs_info); @@ -6655,51 +6651,51 @@ use_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_block_rsv *block_rsv; struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; int ret; - bool global_updated = false; block_rsv = get_block_rsv(trans, root); - if (unlikely(block_rsv->size == 0)) - goto try_reserve; -again: - ret = block_rsv_use_bytes(block_rsv, blocksize); - if (!ret) + if (block_rsv->size == 0) { + ret = reserve_metadata_bytes(root, block_rsv, blocksize, + BTRFS_RESERVE_NO_FLUSH); + /* + * If we couldn't reserve metadata bytes try and use some from + * the global reserve. + */ + if (ret && block_rsv != global_rsv) { + ret = block_rsv_use_bytes(global_rsv, blocksize); + if (!ret) + return global_rsv; + return ERR_PTR(ret); + } else if (ret) { + return ERR_PTR(ret); + } return block_rsv; - - if (block_rsv->failfast) - return ERR_PTR(ret); - - if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) { - global_updated = true; - update_global_block_rsv(root->fs_info); - goto again; } - if (btrfs_test_opt(root, ENOSPC_DEBUG)) { - static DEFINE_RATELIMIT_STATE(_rs, - DEFAULT_RATELIMIT_INTERVAL * 10, - /*DEFAULT_RATELIMIT_BURST*/ 1); - if (__ratelimit(&_rs)) - WARN(1, KERN_DEBUG - "btrfs: block rsv returned %d\n", ret); - } -try_reserve: - ret = reserve_metadata_bytes(root, block_rsv, blocksize, - BTRFS_RESERVE_NO_FLUSH); + ret = block_rsv_use_bytes(block_rsv, blocksize); if (!ret) return block_rsv; - /* - * If we couldn't reserve metadata bytes try and use some from - * the global reserve if its space type is the same as the global - * reservation. - */ - if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL && - block_rsv->space_info == global_rsv->space_info) { - ret = block_rsv_use_bytes(global_rsv, blocksize); - if (!ret) - return global_rsv; + if (ret && !block_rsv->failfast) { + if (btrfs_test_opt(root, ENOSPC_DEBUG)) { + static DEFINE_RATELIMIT_STATE(_rs, + DEFAULT_RATELIMIT_INTERVAL * 10, + /*DEFAULT_RATELIMIT_BURST*/ 1); + if (__ratelimit(&_rs)) + WARN(1, KERN_DEBUG + "btrfs: block rsv returned %d\n", ret); + } + ret = reserve_metadata_bytes(root, block_rsv, blocksize, + BTRFS_RESERVE_NO_FLUSH); + if (!ret) { + return block_rsv; + } else if (ret && block_rsv != global_rsv) { + ret = block_rsv_use_bytes(global_rsv, blocksize); + if (!ret) + return global_rsv; + } } - return ERR_PTR(ret); + + return ERR_PTR(-ENOSPC); } static void unuse_block_rsv(struct btrfs_fs_info *fs_info, @@ -6767,7 +6763,6 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, extent_op->update_key = 1; extent_op->update_flags = 1; extent_op->is_data = 0; - extent_op->level = level; ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, ins.objectid, @@ -6939,8 +6934,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_set_disk_extent_flags(trans, root, eb->start, - eb->len, flag, - btrfs_header_level(eb), 0); + eb->len, flag, 0); BUG_ON(ret); /* -ENOMEM */ wc->flags[level] |= flag; } diff --git a/trunk/fs/btrfs/extent_io.c b/trunk/fs/btrfs/extent_io.c index 6bca9472f313..32d67a822e93 100644 --- a/trunk/fs/btrfs/extent_io.c +++ b/trunk/fs/btrfs/extent_io.c @@ -23,7 +23,6 @@ static struct kmem_cache *extent_state_cache; static struct kmem_cache *extent_buffer_cache; -static struct bio_set *btrfs_bioset; #ifdef CONFIG_BTRFS_DEBUG static LIST_HEAD(buffers); @@ -126,20 +125,10 @@ int __init extent_io_init(void) SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); if (!extent_buffer_cache) goto free_state_cache; - - btrfs_bioset = bioset_create(BIO_POOL_SIZE, - offsetof(struct btrfs_io_bio, bio)); - if (!btrfs_bioset) - goto free_buffer_cache; return 0; -free_buffer_cache: - kmem_cache_destroy(extent_buffer_cache); - extent_buffer_cache = NULL; - free_state_cache: kmem_cache_destroy(extent_state_cache); - extent_state_cache = NULL; return -ENOMEM; } @@ -156,8 +145,6 @@ void extent_io_exit(void) kmem_cache_destroy(extent_state_cache); if (extent_buffer_cache) kmem_cache_destroy(extent_buffer_cache); - if (btrfs_bioset) - bioset_free(btrfs_bioset); } void extent_io_tree_init(struct extent_io_tree *tree, @@ -1960,6 +1947,28 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) SetPageUptodate(page); } +/* + * helper function to unlock a page if all the extents in the tree + * for that page are unlocked + */ +static void check_page_locked(struct extent_io_tree *tree, struct page *page) +{ + u64 start = page_offset(page); + u64 end = start + PAGE_CACHE_SIZE - 1; + if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) + unlock_page(page); +} + +/* + * helper function to end page writeback if all the extents + * in the tree for that page are done with writeback + */ +static void check_page_writeback(struct extent_io_tree *tree, + struct page *page) +{ + end_page_writeback(page); +} + /* * When IO fails, either with EIO or csum verification fails, we * try other mirrors that might have a good copy of the data. This @@ -2037,7 +2046,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num)) return 0; - bio = btrfs_io_bio_alloc(GFP_NOFS, 1); + bio = bio_alloc(GFP_NOFS, 1); if (!bio) return -EIO; bio->bi_private = &compl; @@ -2327,7 +2336,7 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page, return -EIO; } - bio = btrfs_io_bio_alloc(GFP_NOFS, 1); + bio = bio_alloc(GFP_NOFS, 1); if (!bio) { free_io_failure(inode, failrec, 0); return -EIO; @@ -2389,24 +2398,19 @@ static void end_bio_extent_writepage(struct bio *bio, int err) struct extent_io_tree *tree; u64 start; u64 end; + int whole_page; do { struct page *page = bvec->bv_page; tree = &BTRFS_I(page->mapping->host)->io_tree; - /* We always issue full-page reads, but if some block - * in a page fails to read, blk_update_request() will - * advance bv_offset and adjust bv_len to compensate. - * Print a warning for nonzero offsets, and an error - * if they don't add up to a full page. */ - if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) - printk("%s page write in btrfs with offset %u and length %u\n", - bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE - ? KERN_ERR "partial" : KERN_INFO "incomplete", - bvec->bv_offset, bvec->bv_len); + start = page_offset(page) + bvec->bv_offset; + end = start + bvec->bv_len - 1; - start = page_offset(page); - end = start + bvec->bv_offset + bvec->bv_len - 1; + if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) + whole_page = 1; + else + whole_page = 0; if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); @@ -2414,7 +2418,10 @@ static void end_bio_extent_writepage(struct bio *bio, int err) if (end_extent_writepage(page, err, start, end)) continue; - end_page_writeback(page); + if (whole_page) + end_page_writeback(page); + else + check_page_writeback(tree, page); } while (bvec >= bio->bi_io_vec); bio_put(bio); @@ -2439,6 +2446,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) struct extent_io_tree *tree; u64 start; u64 end; + int whole_page; int mirror; int ret; @@ -2449,26 +2457,19 @@ static void end_bio_extent_readpage(struct bio *bio, int err) struct page *page = bvec->bv_page; struct extent_state *cached = NULL; struct extent_state *state; - struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " - "mirror=%lu\n", (u64)bio->bi_sector, err, - io_bio->mirror_num); + "mirror=%ld\n", (u64)bio->bi_sector, err, + (long int)bio->bi_bdev); tree = &BTRFS_I(page->mapping->host)->io_tree; - /* We always issue full-page reads, but if some block - * in a page fails to read, blk_update_request() will - * advance bv_offset and adjust bv_len to compensate. - * Print a warning for nonzero offsets, and an error - * if they don't add up to a full page. */ - if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) - printk("%s page read in btrfs with offset %u and length %u\n", - bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE - ? KERN_ERR "partial" : KERN_INFO "incomplete", - bvec->bv_offset, bvec->bv_len); + start = page_offset(page) + bvec->bv_offset; + end = start + bvec->bv_len - 1; - start = page_offset(page); - end = start + bvec->bv_offset + bvec->bv_len - 1; + if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) + whole_page = 1; + else + whole_page = 0; if (++bvec <= bvec_end) prefetchw(&bvec->bv_page->flags); @@ -2484,7 +2485,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) } spin_unlock(&tree->lock); - mirror = io_bio->mirror_num; + mirror = (int)(unsigned long)bio->bi_bdev; if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { ret = tree->ops->readpage_end_io_hook(page, start, end, state, mirror); @@ -2527,35 +2528,39 @@ static void end_bio_extent_readpage(struct bio *bio, int err) } unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); - if (uptodate) { - SetPageUptodate(page); + if (whole_page) { + if (uptodate) { + SetPageUptodate(page); + } else { + ClearPageUptodate(page); + SetPageError(page); + } + unlock_page(page); } else { - ClearPageUptodate(page); - SetPageError(page); + if (uptodate) { + check_page_uptodate(tree, page); + } else { + ClearPageUptodate(page); + SetPageError(page); + } + check_page_locked(tree, page); } - unlock_page(page); } while (bvec <= bvec_end); bio_put(bio); } -/* - * this allocates from the btrfs_bioset. We're returning a bio right now - * but you can call btrfs_io_bio for the appropriate container_of magic - */ struct bio * btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, gfp_t gfp_flags) { struct bio *bio; - bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset); + bio = bio_alloc(gfp_flags, nr_vecs); if (bio == NULL && (current->flags & PF_MEMALLOC)) { - while (!bio && (nr_vecs /= 2)) { - bio = bio_alloc_bioset(gfp_flags, - nr_vecs, btrfs_bioset); - } + while (!bio && (nr_vecs /= 2)) + bio = bio_alloc(gfp_flags, nr_vecs); } if (bio) { @@ -2566,19 +2571,6 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, return bio; } -struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask) -{ - return bio_clone_bioset(bio, gfp_mask, btrfs_bioset); -} - - -/* this also allocates from the btrfs_bioset */ -struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) -{ - return bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset); -} - - static int __must_check submit_one_bio(int rw, struct bio *bio, int mirror_num, unsigned long bio_flags) { @@ -2957,7 +2949,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, pg_offset = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || (page->index == end_index && !pg_offset)) { - page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0); unlock_page(page); return 0; } @@ -3996,7 +3988,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, last_for_get_extent = isize; } - lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0, + lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, &cached_state); em = get_extent_skip_holes(inode, start, last_for_get_extent, @@ -4083,7 +4075,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, out_free: free_extent_map(em); out: - unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1, + unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, &cached_state, GFP_NOFS); return ret; } diff --git a/trunk/fs/btrfs/extent_io.h b/trunk/fs/btrfs/extent_io.h index 41fb81e7ec53..a2c03a175009 100644 --- a/trunk/fs/btrfs/extent_io.h +++ b/trunk/fs/btrfs/extent_io.h @@ -336,8 +336,6 @@ int extent_clear_unlock_delalloc(struct inode *inode, struct bio * btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, gfp_t gfp_flags); -struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs); -struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask); struct btrfs_fs_info; diff --git a/trunk/fs/btrfs/free-space-cache.c b/trunk/fs/btrfs/free-space-cache.c index e53009657f0e..ecca6c7375a6 100644 --- a/trunk/fs/btrfs/free-space-cache.c +++ b/trunk/fs/btrfs/free-space-cache.c @@ -197,33 +197,31 @@ int create_free_space_inode(struct btrfs_root *root, block_group->key.objectid); } -int btrfs_check_trunc_cache_free_space(struct btrfs_root *root, - struct btrfs_block_rsv *rsv) -{ - u64 needed_bytes; - int ret; - - /* 1 for slack space, 1 for updating the inode */ - needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) + - btrfs_calc_trans_metadata_size(root, 1); - - spin_lock(&rsv->lock); - if (rsv->reserved < needed_bytes) - ret = -ENOSPC; - else - ret = 0; - spin_unlock(&rsv->lock); - return 0; -} - int btrfs_truncate_free_space_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_path *path, struct inode *inode) { + struct btrfs_block_rsv *rsv; + u64 needed_bytes; loff_t oldsize; int ret = 0; + rsv = trans->block_rsv; + trans->block_rsv = &root->fs_info->global_block_rsv; + + /* 1 for slack space, 1 for updating the inode */ + needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) + + btrfs_calc_trans_metadata_size(root, 1); + + spin_lock(&trans->block_rsv->lock); + if (trans->block_rsv->reserved < needed_bytes) { + spin_unlock(&trans->block_rsv->lock); + trans->block_rsv = rsv; + return -ENOSPC; + } + spin_unlock(&trans->block_rsv->lock); + oldsize = i_size_read(inode); btrfs_i_size_write(inode, 0); truncate_pagecache(inode, oldsize, 0); @@ -234,7 +232,9 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, */ ret = btrfs_truncate_inode_items(trans, root, inode, 0, BTRFS_EXTENT_DATA_KEY); + if (ret) { + trans->block_rsv = rsv; btrfs_abort_transaction(trans, root, ret); return ret; } @@ -242,6 +242,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, ret = btrfs_update_inode(trans, root, inode); if (ret) btrfs_abort_transaction(trans, root, ret); + trans->block_rsv = rsv; return ret; } @@ -919,8 +920,10 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, /* Make sure we can fit our crcs into the first page */ if (io_ctl.check_crcs && - (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) + (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) { + WARN_ON(1); goto out_nospc; + } io_ctl_set_generation(&io_ctl, trans->transid); diff --git a/trunk/fs/btrfs/free-space-cache.h b/trunk/fs/btrfs/free-space-cache.h index 8b7f19f44961..4dc17d8809c7 100644 --- a/trunk/fs/btrfs/free-space-cache.h +++ b/trunk/fs/btrfs/free-space-cache.h @@ -54,8 +54,6 @@ int create_free_space_inode(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); -int btrfs_check_trunc_cache_free_space(struct btrfs_root *root, - struct btrfs_block_rsv *rsv); int btrfs_truncate_free_space_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_path *path, diff --git a/trunk/fs/btrfs/inode-map.c b/trunk/fs/btrfs/inode-map.c index 2c66ddbbe670..d26f67a59e36 100644 --- a/trunk/fs/btrfs/inode-map.c +++ b/trunk/fs/btrfs/inode-map.c @@ -429,12 +429,11 @@ int btrfs_save_ino_cache(struct btrfs_root *root, num_bytes = trans->bytes_reserved; /* * 1 item for inode item insertion if need - * 4 items for inode item update (in the worst case) - * 1 items for slack space if we need do truncation + * 3 items for inode item update (in the worst case) * 1 item for free space object * 3 items for pre-allocation */ - trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10); + trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8); ret = btrfs_block_rsv_add(root, trans->block_rsv, trans->bytes_reserved, BTRFS_RESERVE_NO_FLUSH); @@ -469,8 +468,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root, if (i_size_read(inode) > 0) { ret = btrfs_truncate_free_space_cache(root, trans, path, inode); if (ret) { - if (ret != -ENOSPC) - btrfs_abort_transaction(trans, root, ret); + btrfs_abort_transaction(trans, root, ret); goto out_put; } } diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index 4f9d16b70d3d..9b31b3b091fc 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -715,10 +715,8 @@ static noinline int submit_compressed_extents(struct inode *inode, async_extent->ram_size - 1, 0); em = alloc_extent_map(); - if (!em) { - ret = -ENOMEM; + if (!em) goto out_free_reserve; - } em->start = async_extent->start; em->len = async_extent->ram_size; em->orig_start = em->start; @@ -925,10 +923,8 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans, } em = alloc_extent_map(); - if (!em) { - ret = -ENOMEM; + if (!em) goto out_reserve; - } em->start = start; em->orig_start = em->start; ram_size = ins.offset; @@ -4728,7 +4724,6 @@ void btrfs_evict_inode(struct inode *inode) btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root); no_delete: - btrfs_remove_delayed_node(inode); clear_inode(inode); return; } @@ -4844,13 +4839,14 @@ static void inode_tree_add(struct inode *inode) struct rb_node **p; struct rb_node *parent; u64 ino = btrfs_ino(inode); +again: + p = &root->inode_tree.rb_node; + parent = NULL; if (inode_unhashed(inode)) return; -again: - parent = NULL; + spin_lock(&root->inode_lock); - p = &root->inode_tree.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct btrfs_inode, rb_node); @@ -5137,9 +5133,10 @@ unsigned char btrfs_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; -static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) +static int btrfs_real_readdir(struct file *filp, void *dirent, + filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_item *item; struct btrfs_dir_item *di; @@ -5160,15 +5157,29 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) char tmp_name[32]; char *name_ptr; int name_len; - int is_curr = 0; /* ctx->pos points to the current index? */ + int is_curr = 0; /* filp->f_pos points to the current index? */ /* FIXME, use a real flag for deciding about the key type */ if (root->fs_info->tree_root == root) key_type = BTRFS_DIR_ITEM_KEY; - if (!dir_emit_dots(file, ctx)) - return 0; - + /* special case for "." */ + if (filp->f_pos == 0) { + over = filldir(dirent, ".", 1, + filp->f_pos, btrfs_ino(inode), DT_DIR); + if (over) + return 0; + filp->f_pos = 1; + } + /* special case for .., just use the back ref */ + if (filp->f_pos == 1) { + u64 pino = parent_ino(filp->f_path.dentry); + over = filldir(dirent, "..", 2, + filp->f_pos, pino, DT_DIR); + if (over) + return 0; + filp->f_pos = 2; + } path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -5182,7 +5193,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) } btrfs_set_key_type(&key, key_type); - key.offset = ctx->pos; + key.offset = filp->f_pos; key.objectid = btrfs_ino(inode); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -5208,14 +5219,14 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) break; if (btrfs_key_type(&found_key) != key_type) break; - if (found_key.offset < ctx->pos) + if (found_key.offset < filp->f_pos) goto next; if (key_type == BTRFS_DIR_INDEX_KEY && btrfs_should_delete_dir_index(&del_list, found_key.offset)) goto next; - ctx->pos = found_key.offset; + filp->f_pos = found_key.offset; is_curr = 1; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); @@ -5259,8 +5270,9 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) over = 0; goto skip; } - over = !dir_emit(ctx, name_ptr, name_len, - location.objectid, d_type); + over = filldir(dirent, name_ptr, name_len, + found_key.offset, location.objectid, + d_type); skip: if (name_ptr != tmp_name) @@ -5279,8 +5291,9 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) if (key_type == BTRFS_DIR_INDEX_KEY) { if (is_curr) - ctx->pos++; - ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); + filp->f_pos++; + ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, + &ins_list); if (ret) goto nopos; } @@ -5291,9 +5304,9 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) * 32-bit glibc will use getdents64, but then strtol - * so the last number we can serve is this. */ - ctx->pos = 0x7fffffff; + filp->f_pos = 0x7fffffff; else - ctx->pos++; + filp->f_pos++; nopos: ret = 0; err: @@ -6915,11 +6928,7 @@ struct btrfs_dio_private { /* IO errors */ int errors; - /* orig_bio is our btrfs_io_bio */ struct bio *orig_bio; - - /* dio_bio came from fs/direct-io.c */ - struct bio *dio_bio; }; static void btrfs_endio_direct_read(struct bio *bio, int err) @@ -6929,7 +6938,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) struct bio_vec *bvec = bio->bi_io_vec; struct inode *inode = dip->inode; struct btrfs_root *root = BTRFS_I(inode)->root; - struct bio *dio_bio; u64 start; start = dip->logical_offset; @@ -6969,15 +6977,14 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, dip->logical_offset + dip->bytes - 1); - dio_bio = dip->dio_bio; + bio->bi_private = dip->private; kfree(dip); /* If we had a csum failure make sure to clear the uptodate flag */ if (err) - clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); - dio_end_io(dio_bio, err); - bio_put(bio); + clear_bit(BIO_UPTODATE, &bio->bi_flags); + dio_end_io(bio, err); } static void btrfs_endio_direct_write(struct bio *bio, int err) @@ -6988,7 +6995,6 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) struct btrfs_ordered_extent *ordered = NULL; u64 ordered_offset = dip->logical_offset; u64 ordered_bytes = dip->bytes; - struct bio *dio_bio; int ret; if (err) @@ -7016,15 +7022,14 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) goto again; } out_done: - dio_bio = dip->dio_bio; + bio->bi_private = dip->private; kfree(dip); /* If we had an error make sure to clear the uptodate flag */ if (err) - clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); - dio_end_io(dio_bio, err); - bio_put(bio); + clear_bit(BIO_UPTODATE, &bio->bi_flags); + dio_end_io(bio, err); } static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, @@ -7060,10 +7065,10 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) if (!atomic_dec_and_test(&dip->pending_bios)) goto out; - if (dip->errors) { + if (dip->errors) bio_io_error(dip->orig_bio); - } else { - set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags); + else { + set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags); bio_endio(dip->orig_bio, 0); } out: @@ -7238,34 +7243,25 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, return 0; } -static void btrfs_submit_direct(int rw, struct bio *dio_bio, - struct inode *inode, loff_t file_offset) +static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, + loff_t file_offset) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_dio_private *dip; - struct bio_vec *bvec = dio_bio->bi_io_vec; - struct bio *io_bio; + struct bio_vec *bvec = bio->bi_io_vec; int skip_sum; int write = rw & REQ_WRITE; int ret = 0; skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS); - - if (!io_bio) { - ret = -ENOMEM; - goto free_ordered; - } - dip = kmalloc(sizeof(*dip), GFP_NOFS); if (!dip) { ret = -ENOMEM; - goto free_io_bio; + goto free_ordered; } - dip->private = dio_bio->bi_private; - io_bio->bi_private = dio_bio->bi_private; + dip->private = bio->bi_private; dip->inode = inode; dip->logical_offset = file_offset; @@ -7273,27 +7269,22 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio, do { dip->bytes += bvec->bv_len; bvec++; - } while (bvec <= (dio_bio->bi_io_vec + dio_bio->bi_vcnt - 1)); + } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1)); - dip->disk_bytenr = (u64)dio_bio->bi_sector << 9; - io_bio->bi_private = dip; + dip->disk_bytenr = (u64)bio->bi_sector << 9; + bio->bi_private = dip; dip->errors = 0; - dip->orig_bio = io_bio; - dip->dio_bio = dio_bio; + dip->orig_bio = bio; atomic_set(&dip->pending_bios, 0); if (write) - io_bio->bi_end_io = btrfs_endio_direct_write; + bio->bi_end_io = btrfs_endio_direct_write; else - io_bio->bi_end_io = btrfs_endio_direct_read; + bio->bi_end_io = btrfs_endio_direct_read; ret = btrfs_submit_direct_hook(rw, dip, skip_sum); if (!ret) return; - -free_io_bio: - bio_put(io_bio); - free_ordered: /* * If this is a write, we need to clean up the reserved space and kill @@ -7309,7 +7300,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio, btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); } - bio_endio(dio_bio, ret); + bio_endio(bio, ret); } static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, @@ -7493,8 +7484,7 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); } -static void btrfs_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void btrfs_invalidatepage(struct page *page, unsigned long offset) { struct inode *inode = page->mapping->host; struct extent_io_tree *tree; @@ -7989,6 +7979,7 @@ void btrfs_destroy_inode(struct inode *inode) inode_tree_del(inode); btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); free: + btrfs_remove_delayed_node(inode); call_rcu(&inode->i_rcu, btrfs_i_callback); } @@ -7996,9 +7987,6 @@ int btrfs_drop_inode(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - if (root == NULL) - return 1; - /* the snap/subvol tree is on deleting */ if (btrfs_root_refs(&root->root_item) == 0 && root != root->fs_info->tree_root) @@ -8715,7 +8703,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = { static const struct file_operations btrfs_dir_file_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = btrfs_real_readdir, + .readdir = btrfs_real_readdir, .unlocked_ioctl = btrfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = btrfs_ioctl, diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c index 0f81d67cdc8d..0de4a2fcfb24 100644 --- a/trunk/fs/btrfs/ioctl.c +++ b/trunk/fs/btrfs/ioctl.c @@ -1801,11 +1801,7 @@ static noinline int copy_to_sk(struct btrfs_root *root, item_off = btrfs_item_ptr_offset(leaf, i); item_len = btrfs_item_size_nr(leaf, i); - btrfs_item_key_to_cpu(leaf, key, i); - if (!key_in_sk(key, sk)) - continue; - - if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE) + if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) item_len = 0; if (sizeof(sh) + item_len + *sk_offset > @@ -1814,6 +1810,10 @@ static noinline int copy_to_sk(struct btrfs_root *root, goto overflow; } + btrfs_item_key_to_cpu(leaf, key, i); + if (!key_in_sk(key, sk)) + continue; + sh.objectid = key->objectid; sh.offset = key->offset; sh.type = key->type; diff --git a/trunk/fs/btrfs/raid56.c b/trunk/fs/btrfs/raid56.c index 0525e1389f5b..0740621daf6c 100644 --- a/trunk/fs/btrfs/raid56.c +++ b/trunk/fs/btrfs/raid56.c @@ -1050,7 +1050,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, } /* put a new bio on the list */ - bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); + bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); if (!bio) return -ENOMEM; diff --git a/trunk/fs/btrfs/relocation.c b/trunk/fs/btrfs/relocation.c index 4febca4fc2de..704a1b8d2a2b 100644 --- a/trunk/fs/btrfs/relocation.c +++ b/trunk/fs/btrfs/relocation.c @@ -1773,7 +1773,7 @@ int replace_path(struct btrfs_trans_handle *trans, if (!eb || !extent_buffer_uptodate(eb)) { ret = (!eb) ? -ENOMEM : -EIO; free_extent_buffer(eb); - break; + return ret; } btrfs_tree_lock(eb); if (cow) { @@ -3350,11 +3350,6 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, } truncate: - ret = btrfs_check_trunc_cache_free_space(root, - &fs_info->global_block_rsv); - if (ret) - goto out; - path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; @@ -4082,7 +4077,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, return inode; } -static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) +static struct reloc_control *alloc_reloc_control(void) { struct reloc_control *rc; @@ -4093,8 +4088,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) INIT_LIST_HEAD(&rc->reloc_roots); backref_cache_init(&rc->backref_cache); mapping_tree_init(&rc->reloc_root_tree); - extent_io_tree_init(&rc->processed_blocks, - fs_info->btree_inode->i_mapping); + extent_io_tree_init(&rc->processed_blocks, NULL); return rc; } @@ -4111,7 +4105,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) int rw = 0; int err = 0; - rc = alloc_reloc_control(fs_info); + rc = alloc_reloc_control(); if (!rc) return -ENOMEM; @@ -4312,7 +4306,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) if (list_empty(&reloc_roots)) goto out; - rc = alloc_reloc_control(root->fs_info); + rc = alloc_reloc_control(); if (!rc) { err = -ENOMEM; goto out; diff --git a/trunk/fs/btrfs/scrub.c b/trunk/fs/btrfs/scrub.c index 79bd479317cb..f489e24659a4 100644 --- a/trunk/fs/btrfs/scrub.c +++ b/trunk/fs/btrfs/scrub.c @@ -1296,7 +1296,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, } WARN_ON(!page->page); - bio = btrfs_io_bio_alloc(GFP_NOFS, 1); + bio = bio_alloc(GFP_NOFS, 1); if (!bio) { page->io_error = 1; sblock->no_io_error_seen = 0; @@ -1431,7 +1431,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, return -EIO; } - bio = btrfs_io_bio_alloc(GFP_NOFS, 1); + bio = bio_alloc(GFP_NOFS, 1); if (!bio) return -EIO; bio->bi_bdev = page_bad->dev->bdev; @@ -1522,7 +1522,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, sbio->dev = wr_ctx->tgtdev; bio = sbio->bio; if (!bio) { - bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); + bio = bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); if (!bio) { mutex_unlock(&wr_ctx->wr_lock); return -ENOMEM; @@ -1930,7 +1930,7 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, sbio->dev = spage->dev; bio = sbio->bio; if (!bio) { - bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); + bio = bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); if (!bio) return -ENOMEM; sbio->bio = bio; @@ -3307,7 +3307,7 @@ static int write_page_nocow(struct scrub_ctx *sctx, "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n"); return -EIO; } - bio = btrfs_io_bio_alloc(GFP_NOFS, 1); + bio = bio_alloc(GFP_NOFS, 1); if (!bio) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; diff --git a/trunk/fs/btrfs/super.c b/trunk/fs/btrfs/super.c index f0857e092a3c..a4807ced23cc 100644 --- a/trunk/fs/btrfs/super.c +++ b/trunk/fs/btrfs/super.c @@ -1263,7 +1263,6 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) btrfs_dev_replace_suspend_for_unmount(fs_info); btrfs_scrub_cancel(fs_info); - btrfs_pause_balance(fs_info); ret = btrfs_commit_super(root); if (ret) diff --git a/trunk/fs/btrfs/volumes.c b/trunk/fs/btrfs/volumes.c index 8bffb9174afb..0e925ced971b 100644 --- a/trunk/fs/btrfs/volumes.c +++ b/trunk/fs/btrfs/volumes.c @@ -3120,13 +3120,14 @@ int btrfs_balance(struct btrfs_balance_control *bctl, allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; if (num_devices == 1) allowed |= BTRFS_BLOCK_GROUP_DUP; - else if (num_devices > 1) + else if (num_devices < 4) allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); - if (num_devices > 2) - allowed |= BTRFS_BLOCK_GROUP_RAID5; - if (num_devices > 3) - allowed |= (BTRFS_BLOCK_GROUP_RAID10 | - BTRFS_BLOCK_GROUP_RAID6); + else + allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_RAID10 | + BTRFS_BLOCK_GROUP_RAID5 | + BTRFS_BLOCK_GROUP_RAID6); + if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) && (!alloc_profile_is_valid(bctl->data.target, 1) || (bctl->data.target & ~allowed))) { @@ -5018,16 +5019,42 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, return 0; } +static void *merge_stripe_index_into_bio_private(void *bi_private, + unsigned int stripe_index) +{ + /* + * with single, dup, RAID0, RAID1 and RAID10, stripe_index is + * at most 1. + * The alternative solution (instead of stealing bits from the + * pointer) would be to allocate an intermediate structure + * that contains the old private pointer plus the stripe_index. + */ + BUG_ON((((uintptr_t)bi_private) & 3) != 0); + BUG_ON(stripe_index > 3); + return (void *)(((uintptr_t)bi_private) | stripe_index); +} + +static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private) +{ + return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3)); +} + +static unsigned int extract_stripe_index_from_bio_private(void *bi_private) +{ + return (unsigned int)((uintptr_t)bi_private) & 3; +} + static void btrfs_end_bio(struct bio *bio, int err) { - struct btrfs_bio *bbio = bio->bi_private; + struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private); int is_orig_bio = 0; if (err) { atomic_inc(&bbio->error); if (err == -EIO || err == -EREMOTEIO) { unsigned int stripe_index = - btrfs_io_bio(bio)->stripe_index; + extract_stripe_index_from_bio_private( + bio->bi_private); struct btrfs_device *dev; BUG_ON(stripe_index >= bbio->num_stripes); @@ -5057,7 +5084,8 @@ static void btrfs_end_bio(struct bio *bio, int err) } bio->bi_private = bbio->private; bio->bi_end_io = bbio->end_io; - btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; + bio->bi_bdev = (struct block_device *) + (unsigned long)bbio->mirror_num; /* only send an error to the higher layers if it is * beyond the tolerance of the btrfs bio */ @@ -5183,7 +5211,8 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, struct btrfs_device *dev = bbio->stripes[dev_nr].dev; bio->bi_private = bbio; - btrfs_io_bio(bio)->stripe_index = dev_nr; + bio->bi_private = merge_stripe_index_into_bio_private( + bio->bi_private, (unsigned int)dev_nr); bio->bi_end_io = btrfs_end_bio; bio->bi_sector = physical >> 9; #ifdef DEBUG @@ -5244,7 +5273,8 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) if (atomic_dec_and_test(&bbio->stripes_pending)) { bio->bi_private = bbio->private; bio->bi_end_io = bbio->end_io; - btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; + bio->bi_bdev = (struct block_device *) + (unsigned long)bbio->mirror_num; bio->bi_sector = logical >> 9; kfree(bbio); bio_endio(bio, -EIO); @@ -5322,7 +5352,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, } if (dev_nr < total_devs - 1) { - bio = btrfs_bio_clone(first_bio, GFP_NOFS); + bio = bio_clone(first_bio, GFP_NOFS); BUG_ON(!bio); /* -ENOMEM */ } else { bio = first_bio; diff --git a/trunk/fs/btrfs/volumes.h b/trunk/fs/btrfs/volumes.h index f6247e2a47f7..845ccbb0d2e3 100644 --- a/trunk/fs/btrfs/volumes.h +++ b/trunk/fs/btrfs/volumes.h @@ -152,26 +152,6 @@ struct btrfs_fs_devices { int rotating; }; -/* - * we need the mirror number and stripe index to be passed around - * the call chain while we are processing end_io (especially errors). - * Really, what we need is a btrfs_bio structure that has this info - * and is properly sized with its stripe array, but we're not there - * quite yet. We have our own btrfs bioset, and all of the bios - * we allocate are actually btrfs_io_bios. We'll cram as much of - * struct btrfs_bio as we can into this over time. - */ -struct btrfs_io_bio { - unsigned long mirror_num; - unsigned long stripe_index; - struct bio bio; -}; - -static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio) -{ - return container_of(bio, struct btrfs_io_bio, bio); -} - struct btrfs_bio_stripe { struct btrfs_device *dev; u64 physical; diff --git a/trunk/fs/buffer.c b/trunk/fs/buffer.c index f93392e2df12..d2a4d1bb2d57 100644 --- a/trunk/fs/buffer.c +++ b/trunk/fs/buffer.c @@ -1454,8 +1454,7 @@ static void discard_buffer(struct buffer_head * bh) * block_invalidatepage - invalidate part or all of a buffer-backed page * * @page: the page which is affected - * @offset: start of the range to invalidate - * @length: length of the range to invalidate + * @offset: the index of the truncation point * * block_invalidatepage() is called when all or part of the page has become * invalidated by a truncate operation. @@ -1466,34 +1465,21 @@ static void discard_buffer(struct buffer_head * bh) * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ -void block_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +void block_invalidatepage(struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; - unsigned int stop = length + offset; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) goto out; - /* - * Check for overflow - */ - BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); - head = page_buffers(page); bh = head; do { unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; - /* - * Are we still fully in range ? - */ - if (next_off > stop) - goto out; - /* * is this block fully invalidated? */ @@ -1515,7 +1501,6 @@ void block_invalidatepage(struct page *page, unsigned int offset, } EXPORT_SYMBOL(block_invalidatepage); - /* * We attach and possibly dirty the buffers atomically wrt * __set_page_dirty_buffers() via private_lock. try_to_free_buffers @@ -2856,7 +2841,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block, * they may have been added in ext3_writepage(). Make them * freeable here, so the page does not leak. */ - do_invalidatepage(page, 0, PAGE_CACHE_SIZE); + do_invalidatepage(page, 0); unlock_page(page); return 0; /* don't care */ } diff --git a/trunk/fs/cachefiles/interface.c b/trunk/fs/cachefiles/interface.c index d4c1206af9fc..746ce532e130 100644 --- a/trunk/fs/cachefiles/interface.c +++ b/trunk/fs/cachefiles/interface.c @@ -13,6 +13,8 @@ #include #include "internal.h" +#define list_to_page(head) (list_entry((head)->prev, struct page, lru)) + struct cachefiles_lookup_data { struct cachefiles_xattr *auxdata; /* auxiliary data */ char *key; /* key path */ @@ -210,29 +212,20 @@ static void cachefiles_update_object(struct fscache_object *_object) object = container_of(_object, struct cachefiles_object, fscache); cache = container_of(object->fscache.cache, struct cachefiles_cache, cache); - - if (!fscache_use_cookie(_object)) { - _leave(" [relinq]"); - return; - } - cookie = object->fscache.cookie; if (!cookie->def->get_aux) { - fscache_unuse_cookie(_object); _leave(" [no aux]"); return; } auxdata = kmalloc(2 + 512 + 3, cachefiles_gfp); if (!auxdata) { - fscache_unuse_cookie(_object); _leave(" [nomem]"); return; } auxlen = cookie->def->get_aux(cookie->netfs_data, auxdata->data, 511); - fscache_unuse_cookie(_object); ASSERTCMP(auxlen, <, 511); auxdata->len = auxlen + 1; @@ -270,7 +263,7 @@ static void cachefiles_drop_object(struct fscache_object *_object) #endif /* delete retired objects */ - if (test_bit(FSCACHE_COOKIE_RETIRED, &object->fscache.cookie->flags) && + if (object->fscache.state == FSCACHE_OBJECT_RECYCLING && _object != cache->cache.fsdef ) { _debug("- retire object OBJ%x", object->fscache.debug_id); diff --git a/trunk/fs/cachefiles/namei.c b/trunk/fs/cachefiles/namei.c index 25badd1aec5c..8c01c5fcdf75 100644 --- a/trunk/fs/cachefiles/namei.c +++ b/trunk/fs/cachefiles/namei.c @@ -38,7 +38,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object, printk(KERN_ERR "%sobject: OBJ%x\n", prefix, object->fscache.debug_id); printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n", - prefix, object->fscache.state->name, + prefix, fscache_object_states[object->fscache.state], object->fscache.flags, work_busy(&object->fscache.work), object->fscache.events, object->fscache.event_mask); printk(KERN_ERR "%sops=%u inp=%u exc=%u\n", @@ -127,10 +127,10 @@ static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, found_dentry: kdebug("preemptive burial: OBJ%x [%s] %p", object->fscache.debug_id, - object->fscache.state->name, + fscache_object_states[object->fscache.state], dentry); - if (fscache_object_is_live(&object->fscache)) { + if (object->fscache.state < FSCACHE_OBJECT_DYING) { printk(KERN_ERR "\n"); printk(KERN_ERR "CacheFiles: Error:" " Can't preemptively bury live object\n"); @@ -192,7 +192,7 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache, /* an old object from a previous incarnation is hogging the slot - we * need to wait for it to be destroyed */ wait_for_old_object: - if (fscache_object_is_live(&object->fscache)) { + if (xobject->fscache.state < FSCACHE_OBJECT_DYING) { printk(KERN_ERR "\n"); printk(KERN_ERR "CacheFiles: Error:" " Unexpected object collision\n"); @@ -836,7 +836,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename); /* look up the victim */ - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&dir->d_inode->i_mutex, 1); start = jiffies; victim = lookup_one_len(filename, dir, strlen(filename)); diff --git a/trunk/fs/cachefiles/xattr.c b/trunk/fs/cachefiles/xattr.c index 2476e5162609..73b46288b54b 100644 --- a/trunk/fs/cachefiles/xattr.c +++ b/trunk/fs/cachefiles/xattr.c @@ -109,12 +109,13 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object, struct dentry *dentry = object->dentry; int ret; + ASSERT(object->fscache.cookie); ASSERT(dentry); _enter("%p,#%d", object, auxdata->len); /* attempt to install the cache metadata directly */ - _debug("SET #%u", auxdata->len); + _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len); ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, @@ -137,12 +138,13 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object, struct dentry *dentry = object->dentry; int ret; + ASSERT(object->fscache.cookie); ASSERT(dentry); _enter("%p,#%d", object, auxdata->len); /* attempt to install the cache metadata directly */ - _debug("SET #%u", auxdata->len); + _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len); ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, diff --git a/trunk/fs/ceph/addr.c b/trunk/fs/ceph/addr.c index 38b5c1bc6776..3e68ac101040 100644 --- a/trunk/fs/ceph/addr.c +++ b/trunk/fs/ceph/addr.c @@ -143,8 +143,7 @@ static int ceph_set_page_dirty(struct page *page) * dirty page counters appropriately. Only called if there is private * data on the page. */ -static void ceph_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void ceph_invalidatepage(struct page *page, unsigned long offset) { struct inode *inode; struct ceph_inode_info *ci; @@ -164,20 +163,20 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset, if (!PageDirty(page)) pr_err("%p invalidatepage %p page not dirty\n", inode, page); - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0) ClearPageChecked(page); ci = ceph_inode(inode); - if (offset == 0 && length == PAGE_CACHE_SIZE) { - dout("%p invalidatepage %p idx %lu full dirty page\n", - inode, page, page->index); + if (offset == 0) { + dout("%p invalidatepage %p idx %lu full dirty page %lu\n", + inode, page, page->index, offset); ceph_put_wrbuffer_cap_refs(ci, 1, snapc); ceph_put_snap_context(snapc); page->private = 0; ClearPagePrivate(page); } else { - dout("%p invalidatepage %p idx %lu partial dirty page %u(%u)\n", - inode, page, page->index, offset, length); + dout("%p invalidatepage %p idx %lu partial dirty page\n", + inode, page, page->index); } } diff --git a/trunk/fs/ceph/dir.c b/trunk/fs/ceph/dir.c index a40ceda47a32..f02d82b7933e 100644 --- a/trunk/fs/ceph/dir.c +++ b/trunk/fs/ceph/dir.c @@ -111,10 +111,11 @@ static unsigned fpos_off(loff_t p) * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by * the MDS if/when the directory is modified). */ -static int __dcache_readdir(struct file *file, struct dir_context *ctx) +static int __dcache_readdir(struct file *filp, + void *dirent, filldir_t filldir) { - struct ceph_file_info *fi = file->private_data; - struct dentry *parent = file->f_dentry; + struct ceph_file_info *fi = filp->private_data; + struct dentry *parent = filp->f_dentry; struct inode *dir = parent->d_inode; struct list_head *p; struct dentry *dentry, *last; @@ -125,14 +126,14 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx) last = fi->dentry; fi->dentry = NULL; - dout("__dcache_readdir %p at %llu (last %p)\n", dir, ctx->pos, + dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, last); spin_lock(&parent->d_lock); /* start at beginning? */ - if (ctx->pos == 2 || last == NULL || - ctx->pos < ceph_dentry(last)->offset) { + if (filp->f_pos == 2 || last == NULL || + filp->f_pos < ceph_dentry(last)->offset) { if (list_empty(&parent->d_subdirs)) goto out_unlock; p = parent->d_subdirs.prev; @@ -156,11 +157,11 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx) if (!d_unhashed(dentry) && dentry->d_inode && ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && - ctx->pos <= di->offset) + filp->f_pos <= di->offset) break; dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, dentry->d_name.len, dentry->d_name.name, di->offset, - ctx->pos, d_unhashed(dentry) ? " unhashed" : "", + filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", !dentry->d_inode ? " null" : ""); spin_unlock(&dentry->d_lock); p = p->prev; @@ -172,27 +173,29 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx) spin_unlock(&dentry->d_lock); spin_unlock(&parent->d_lock); - dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos, + dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); - ctx->pos = di->offset; - if (!dir_emit(ctx, dentry->d_name.name, - dentry->d_name.len, + filp->f_pos = di->offset; + err = filldir(dirent, dentry->d_name.name, + dentry->d_name.len, di->offset, ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), - dentry->d_inode->i_mode >> 12)) { - if (last) { + dentry->d_inode->i_mode >> 12); + + if (last) { + if (err < 0) { /* remember our position */ fi->dentry = last; fi->next_offset = di->offset; + } else { + dput(last); } - dput(dentry); - return 0; } - - if (last) - dput(last); last = dentry; - ctx->pos++; + if (err < 0) + goto out; + + filp->f_pos++; /* make sure a dentry wasn't dropped while we didn't have parent lock */ if (!ceph_dir_is_complete(dir)) { @@ -232,59 +235,59 @@ static int note_last_dentry(struct ceph_file_info *fi, const char *name, return 0; } -static int ceph_readdir(struct file *file, struct dir_context *ctx) +static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct ceph_file_info *fi = file->private_data; - struct inode *inode = file_inode(file); + struct ceph_file_info *fi = filp->private_data; + struct inode *inode = file_inode(filp); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_mds_client *mdsc = fsc->mdsc; - unsigned frag = fpos_frag(ctx->pos); - int off = fpos_off(ctx->pos); + unsigned frag = fpos_frag(filp->f_pos); + int off = fpos_off(filp->f_pos); int err; u32 ftype; struct ceph_mds_reply_info_parsed *rinfo; const int max_entries = fsc->mount_options->max_readdir; const int max_bytes = fsc->mount_options->max_readdir_bytes; - dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off); + dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); if (fi->flags & CEPH_F_ATEND) return 0; /* always start with . and .. */ - if (ctx->pos == 0) { + if (filp->f_pos == 0) { /* note dir version at start of readdir so we can tell * if any dentries get dropped */ fi->dir_release_count = atomic_read(&ci->i_release_count); dout("readdir off 0 -> '.'\n"); - if (!dir_emit(ctx, ".", 1, + if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), ceph_translate_ino(inode->i_sb, inode->i_ino), - inode->i_mode >> 12)) + inode->i_mode >> 12) < 0) return 0; - ctx->pos = 1; + filp->f_pos = 1; off = 1; } - if (ctx->pos == 1) { - ino_t ino = parent_ino(file->f_dentry); + if (filp->f_pos == 1) { + ino_t ino = parent_ino(filp->f_dentry); dout("readdir off 1 -> '..'\n"); - if (!dir_emit(ctx, "..", 2, + if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), ceph_translate_ino(inode->i_sb, ino), - inode->i_mode >> 12)) + inode->i_mode >> 12) < 0) return 0; - ctx->pos = 2; + filp->f_pos = 2; off = 2; } /* can we use the dcache? */ spin_lock(&ci->i_ceph_lock); - if ((ctx->pos == 2 || fi->dentry) && + if ((filp->f_pos == 2 || fi->dentry) && !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && ceph_snap(inode) != CEPH_SNAPDIR && __ceph_dir_is_complete(ci) && __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { spin_unlock(&ci->i_ceph_lock); - err = __dcache_readdir(file, ctx); + err = __dcache_readdir(filp, dirent, filldir); if (err != -EAGAIN) return err; } else { @@ -324,7 +327,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) return PTR_ERR(req); req->r_inode = inode; ihold(inode); - req->r_dentry = dget(file->f_dentry); + req->r_dentry = dget(filp->f_dentry); /* hints to request -> mds selection code */ req->r_direct_mode = USE_AUTH_MDS; req->r_direct_hash = ceph_frag_value(frag); @@ -376,16 +379,15 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) rinfo = &fi->last_readdir->r_reply_info; dout("readdir frag %x num %d off %d chunkoff %d\n", frag, rinfo->dir_nr, off, fi->offset); - - ctx->pos = ceph_make_fpos(frag, off); while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { + u64 pos = ceph_make_fpos(frag, off); struct ceph_mds_reply_inode *in = rinfo->dir_in[off - fi->offset].in; struct ceph_vino vino; ino_t ino; dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", - off, off - fi->offset, rinfo->dir_nr, ctx->pos, + off, off - fi->offset, rinfo->dir_nr, pos, rinfo->dir_dname_len[off - fi->offset], rinfo->dir_dname[off - fi->offset], in); BUG_ON(!in); @@ -393,15 +395,16 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) vino.ino = le64_to_cpu(in->ino); vino.snap = le64_to_cpu(in->snapid); ino = ceph_vino_to_ino(vino); - if (!dir_emit(ctx, + if (filldir(dirent, rinfo->dir_dname[off - fi->offset], rinfo->dir_dname_len[off - fi->offset], - ceph_translate_ino(inode->i_sb, ino), ftype)) { + pos, + ceph_translate_ino(inode->i_sb, ino), ftype) < 0) { dout("filldir stopping us...\n"); return 0; } off++; - ctx->pos++; + filp->f_pos = pos + 1; } if (fi->last_name) { @@ -414,7 +417,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) if (!ceph_frag_is_rightmost(frag)) { frag = ceph_frag_next(frag); off = 0; - ctx->pos = ceph_make_fpos(frag, off); + filp->f_pos = ceph_make_fpos(frag, off); dout("readdir next frag is %x\n", frag); goto more; } @@ -429,11 +432,11 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) if (atomic_read(&ci->i_release_count) == fi->dir_release_count) { dout(" marking %p complete\n", inode); __ceph_dir_set_complete(ci, fi->dir_release_count); - ci->i_max_offset = ctx->pos; + ci->i_max_offset = filp->f_pos; } spin_unlock(&ci->i_ceph_lock); - dout("readdir %p file %p done.\n", inode, file); + dout("readdir %p filp %p done.\n", inode, filp); return 0; } @@ -1265,7 +1268,7 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) const struct file_operations ceph_dir_fops = { .read = ceph_read_dir, - .iterate = ceph_readdir, + .readdir = ceph_readdir, .llseek = ceph_dir_llseek, .open = ceph_open, .release = ceph_release, diff --git a/trunk/fs/ceph/locks.c b/trunk/fs/ceph/locks.c index ebbf680378e2..202dd3d68be0 100644 --- a/trunk/fs/ceph/locks.c +++ b/trunk/fs/ceph/locks.c @@ -191,23 +191,27 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) } /** - * Encode the flock and fcntl locks for the given inode into the ceph_filelock - * array. Must be called with lock_flocks() already held. - * If we encounter more of a specific lock type than expected, return -ENOSPC. + * Encode the flock and fcntl locks for the given inode into the pagelist. + * Format is: #fcntl locks, sequential fcntl locks, #flock locks, + * sequential flock locks. + * Must be called with lock_flocks() already held. + * If we encounter more of a specific lock type than expected, + * we return the value 1. */ -int ceph_encode_locks_to_buffer(struct inode *inode, - struct ceph_filelock *flocks, - int num_fcntl_locks, int num_flock_locks) +int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, + int num_fcntl_locks, int num_flock_locks) { struct file_lock *lock; + struct ceph_filelock cephlock; int err = 0; int seen_fcntl = 0; int seen_flock = 0; - int l = 0; dout("encoding %d flock and %d fcntl locks", num_flock_locks, num_fcntl_locks); - + err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32)); + if (err) + goto fail; for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_POSIX) { ++seen_fcntl; @@ -215,12 +219,19 @@ int ceph_encode_locks_to_buffer(struct inode *inode, err = -ENOSPC; goto fail; } - err = lock_to_ceph_filelock(lock, &flocks[l]); + err = lock_to_ceph_filelock(lock, &cephlock); if (err) goto fail; - ++l; + err = ceph_pagelist_append(pagelist, &cephlock, + sizeof(struct ceph_filelock)); } + if (err) + goto fail; } + + err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32)); + if (err) + goto fail; for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_FLOCK) { ++seen_flock; @@ -228,51 +239,19 @@ int ceph_encode_locks_to_buffer(struct inode *inode, err = -ENOSPC; goto fail; } - err = lock_to_ceph_filelock(lock, &flocks[l]); + err = lock_to_ceph_filelock(lock, &cephlock); if (err) goto fail; - ++l; + err = ceph_pagelist_append(pagelist, &cephlock, + sizeof(struct ceph_filelock)); } + if (err) + goto fail; } fail: return err; } -/** - * Copy the encoded flock and fcntl locks into the pagelist. - * Format is: #fcntl locks, sequential fcntl locks, #flock locks, - * sequential flock locks. - * Returns zero on success. - */ -int ceph_locks_to_pagelist(struct ceph_filelock *flocks, - struct ceph_pagelist *pagelist, - int num_fcntl_locks, int num_flock_locks) -{ - int err = 0; - __le32 nlocks; - - nlocks = cpu_to_le32(num_fcntl_locks); - err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); - if (err) - goto out_fail; - - err = ceph_pagelist_append(pagelist, flocks, - num_fcntl_locks * sizeof(*flocks)); - if (err) - goto out_fail; - - nlocks = cpu_to_le32(num_flock_locks); - err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); - if (err) - goto out_fail; - - err = ceph_pagelist_append(pagelist, - &flocks[num_fcntl_locks], - num_flock_locks * sizeof(*flocks)); -out_fail: - return err; -} - /* * Given a pointer to a lock, convert it to a ceph filelock */ diff --git a/trunk/fs/ceph/mds_client.c b/trunk/fs/ceph/mds_client.c index 4d2920304be8..4f22671a5bd4 100644 --- a/trunk/fs/ceph/mds_client.c +++ b/trunk/fs/ceph/mds_client.c @@ -2478,44 +2478,39 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, if (recon_state->flock) { int num_fcntl_locks, num_flock_locks; - struct ceph_filelock *flocks; - -encode_again: - lock_flocks(); - ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); - unlock_flocks(); - flocks = kmalloc((num_fcntl_locks+num_flock_locks) * - sizeof(struct ceph_filelock), GFP_NOFS); - if (!flocks) { - err = -ENOMEM; - goto out_free; - } - lock_flocks(); - err = ceph_encode_locks_to_buffer(inode, flocks, - num_fcntl_locks, - num_flock_locks); - unlock_flocks(); - if (err) { - kfree(flocks); - if (err == -ENOSPC) - goto encode_again; - goto out_free; - } - /* - * number of encoded locks is stable, so copy to pagelist - */ - rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) + - (num_fcntl_locks+num_flock_locks) * - sizeof(struct ceph_filelock)); - err = ceph_pagelist_append(pagelist, &rec, reclen); - if (!err) - err = ceph_locks_to_pagelist(flocks, pagelist, - num_fcntl_locks, - num_flock_locks); - kfree(flocks); + struct ceph_pagelist_cursor trunc_point; + + ceph_pagelist_set_cursor(pagelist, &trunc_point); + do { + lock_flocks(); + ceph_count_locks(inode, &num_fcntl_locks, + &num_flock_locks); + rec.v2.flock_len = (2*sizeof(u32) + + (num_fcntl_locks+num_flock_locks) * + sizeof(struct ceph_filelock)); + unlock_flocks(); + + /* pre-alloc pagelist */ + ceph_pagelist_truncate(pagelist, &trunc_point); + err = ceph_pagelist_append(pagelist, &rec, reclen); + if (!err) + err = ceph_pagelist_reserve(pagelist, + rec.v2.flock_len); + + /* encode locks */ + if (!err) { + lock_flocks(); + err = ceph_encode_locks(inode, + pagelist, + num_fcntl_locks, + num_flock_locks); + unlock_flocks(); + } + } while (err == -ENOSPC); } else { err = ceph_pagelist_append(pagelist, &rec, reclen); } + out_free: kfree(path); out_dput: diff --git a/trunk/fs/ceph/super.h b/trunk/fs/ceph/super.h index 7ccfdb4aea2e..8696be2ff679 100644 --- a/trunk/fs/ceph/super.h +++ b/trunk/fs/ceph/super.h @@ -822,13 +822,8 @@ extern const struct export_operations ceph_export_ops; extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num); -extern int ceph_encode_locks_to_buffer(struct inode *inode, - struct ceph_filelock *flocks, - int num_fcntl_locks, - int num_flock_locks); -extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks, - struct ceph_pagelist *pagelist, - int num_fcntl_locks, int num_flock_locks); +extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p, + int p_locks, int f_locks); extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c); /* debugfs.c */ diff --git a/trunk/fs/cifs/cifs_dfs_ref.c b/trunk/fs/cifs/cifs_dfs_ref.c index 58df174deb10..8e33ec65847b 100644 --- a/trunk/fs/cifs/cifs_dfs_ref.c +++ b/trunk/fs/cifs/cifs_dfs_ref.c @@ -18,7 +18,6 @@ #include #include #include -#include #include "cifsglob.h" #include "cifsproto.h" #include "cifsfs.h" @@ -49,74 +48,58 @@ void cifs_dfs_release_automount_timer(void) } /** - * cifs_build_devname - build a devicename from a UNC and optional prepath - * @nodename: pointer to UNC string - * @prepath: pointer to prefixpath (or NULL if there isn't one) + * cifs_get_share_name - extracts share name from UNC + * @node_name: pointer to UNC string * - * Build a new cifs devicename after chasing a DFS referral. Allocate a buffer - * big enough to hold the final thing. Copy the UNC from the nodename, and - * concatenate the prepath onto the end of it if there is one. - * - * Returns pointer to the built string, or a ERR_PTR. Caller is responsible - * for freeing the returned string. + * Extracts sharename form full UNC. + * i.e. strips from UNC trailing path that is not part of share + * name and fixup missing '\' in the beginning of DFS node refferal + * if necessary. + * Returns pointer to share name on success or ERR_PTR on error. + * Caller is responsible for freeing returned string. */ -static char * -cifs_build_devname(char *nodename, const char *prepath) +static char *cifs_get_share_name(const char *node_name) { - size_t pplen; - size_t unclen; - char *dev; - char *pos; - - /* skip over any preceding delimiters */ - nodename += strspn(nodename, "\\"); - if (!*nodename) - return ERR_PTR(-EINVAL); - - /* get length of UNC and set pos to last char */ - unclen = strlen(nodename); - pos = nodename + unclen - 1; + int len; + char *UNC; + char *pSep; + + len = strlen(node_name); + UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */, + GFP_KERNEL); + if (!UNC) + return ERR_PTR(-ENOMEM); - /* trim off any trailing delimiters */ - while (*pos == '\\') { - --pos; - --unclen; + /* get share name and server name */ + if (node_name[1] != '\\') { + UNC[0] = '\\'; + strncpy(UNC+1, node_name, len); + len++; + UNC[len] = 0; + } else { + strncpy(UNC, node_name, len); + UNC[len] = 0; } - /* allocate a buffer: - * +2 for preceding "//" - * +1 for delimiter between UNC and prepath - * +1 for trailing NULL - */ - pplen = prepath ? strlen(prepath) : 0; - dev = kmalloc(2 + unclen + 1 + pplen + 1, GFP_KERNEL); - if (!dev) - return ERR_PTR(-ENOMEM); - - pos = dev; - /* add the initial "//" */ - *pos = '/'; - ++pos; - *pos = '/'; - ++pos; - - /* copy in the UNC portion from referral */ - memcpy(pos, nodename, unclen); - pos += unclen; - - /* copy the prefixpath remainder (if there is one) */ - if (pplen) { - *pos = '/'; - ++pos; - memcpy(pos, prepath, pplen); - pos += pplen; + /* find server name end */ + pSep = memchr(UNC+2, '\\', len-2); + if (!pSep) { + cifs_dbg(VFS, "%s: no server name end in node name: %s\n", + __func__, node_name); + kfree(UNC); + return ERR_PTR(-EINVAL); } - /* NULL terminator */ - *pos = '\0'; + /* find sharename end */ + pSep++; + pSep = memchr(UNC+(pSep-UNC), '\\', len-(pSep-UNC)); + if (pSep) { + /* trim path up to sharename end + * now we have share name in UNC */ + *pSep = 0; + } - convert_delimiter(dev, '/'); - return dev; + return UNC; } @@ -140,7 +123,6 @@ char *cifs_compose_mount_options(const char *sb_mountdata, { int rc; char *mountdata = NULL; - const char *prepath = NULL; int md_len; char *tkn_e; char *srvIP = NULL; @@ -150,10 +132,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata, if (sb_mountdata == NULL) return ERR_PTR(-EINVAL); - if (strlen(fullpath) - ref->path_consumed) - prepath = fullpath + ref->path_consumed; - - *devname = cifs_build_devname(ref->node_name, prepath); + *devname = cifs_get_share_name(ref->node_name); if (IS_ERR(*devname)) { rc = PTR_ERR(*devname); *devname = NULL; @@ -167,14 +146,12 @@ char *cifs_compose_mount_options(const char *sb_mountdata, goto compose_mount_options_err; } - /* - * In most cases, we'll be building a shorter string than the original, - * but we do have to assume that the address in the ip= option may be - * much longer than the original. Add the max length of an address - * string to the length of the original string to allow for worst case. + /* md_len = strlen(...) + 12 for 'sep+prefixpath=' + * assuming that we have 'unc=' and 'ip=' in + * the original sb_mountdata */ - md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN; - mountdata = kzalloc(md_len + 1, GFP_KERNEL); + md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12; + mountdata = kzalloc(md_len+1, GFP_KERNEL); if (mountdata == NULL) { rc = -ENOMEM; goto compose_mount_options_err; @@ -218,6 +195,26 @@ char *cifs_compose_mount_options(const char *sb_mountdata, strncat(mountdata, &sep, 1); strcat(mountdata, "ip="); strcat(mountdata, srvIP); + strncat(mountdata, &sep, 1); + strcat(mountdata, "unc="); + strcat(mountdata, *devname); + + /* find & copy prefixpath */ + tkn_e = strchr(ref->node_name + 2, '\\'); + if (tkn_e == NULL) { + /* invalid unc, missing share name*/ + rc = -EINVAL; + goto compose_mount_options_err; + } + + tkn_e = strchr(tkn_e + 1, '\\'); + if (tkn_e || (strlen(fullpath) - ref->path_consumed)) { + strncat(mountdata, &sep, 1); + strcat(mountdata, "prefixpath="); + if (tkn_e) + strcat(mountdata, tkn_e + 1); + strcat(mountdata, fullpath + ref->path_consumed); + } /*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/ /*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/ diff --git a/trunk/fs/cifs/cifsfs.c b/trunk/fs/cifs/cifsfs.c index 540c1ccfcdb2..72e4efee1389 100644 --- a/trunk/fs/cifs/cifsfs.c +++ b/trunk/fs/cifs/cifsfs.c @@ -372,6 +372,9 @@ cifs_show_options(struct seq_file *s, struct dentry *root) cifs_show_security(s, tcon->ses->server); cifs_show_cache_flavor(s, cifs_sb); + seq_printf(s, ",unc="); + seq_escape(s, tcon->treeName, " \t\n\\"); + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) seq_printf(s, ",multiuser"); else if (tcon->ses->user_name) @@ -968,7 +971,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { }; const struct file_operations cifs_dir_ops = { - .iterate = cifs_readdir, + .readdir = cifs_readdir, .release = cifs_closedir, .read = generic_read_dir, .unlocked_ioctl = cifs_ioctl, diff --git a/trunk/fs/cifs/cifsfs.h b/trunk/fs/cifs/cifsfs.h index d05b3028e3b9..0e32c3446ce9 100644 --- a/trunk/fs/cifs/cifsfs.h +++ b/trunk/fs/cifs/cifsfs.h @@ -101,7 +101,7 @@ extern int cifs_file_mmap(struct file * , struct vm_area_struct *); extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *); extern const struct file_operations cifs_dir_ops; extern int cifs_dir_open(struct inode *inode, struct file *file); -extern int cifs_readdir(struct file *file, struct dir_context *ctx); +extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); /* Functions related to dir entries */ extern const struct dentry_operations cifs_dentry_ops; diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c index e3bc39bb9d12..99eeaa17ee00 100644 --- a/trunk/fs/cifs/connect.c +++ b/trunk/fs/cifs/connect.c @@ -1061,7 +1061,6 @@ static int cifs_parse_security_flavors(char *value, #endif case Opt_sec_none: vol->nullauth = 1; - vol->secFlg |= CIFSSEC_MAY_NTLM; break; default: cifs_dbg(VFS, "bad security option: %s\n", value); @@ -1258,18 +1257,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, vol->backupuid_specified = false; /* no backup intent for a user */ vol->backupgid_specified = false; /* no backup intent for a group */ - switch (cifs_parse_devname(devname, vol)) { - case 0: - break; - case -ENOMEM: - cifs_dbg(VFS, "Unable to allocate memory for devname.\n"); - goto cifs_parse_mount_err; - case -EINVAL: - cifs_dbg(VFS, "Malformed UNC in devname.\n"); - goto cifs_parse_mount_err; - default: - cifs_dbg(VFS, "Unknown error parsing devname.\n"); - goto cifs_parse_mount_err; + /* + * For now, we ignore -EINVAL errors under the assumption that the + * unc= and prefixpath= options will be usable. + */ + if (cifs_parse_devname(devname, vol) == -ENOMEM) { + printk(KERN_ERR "CIFS: Unable to allocate memory to parse " + "device string.\n"); + goto out_nomem; } while ((data = strsep(&options, separator)) != NULL) { @@ -1831,7 +1826,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, } #endif if (!vol->UNC) { - cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string!\n"); + cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string or in unc= option!\n"); goto cifs_parse_mount_err; } @@ -3279,8 +3274,8 @@ build_unc_path_to_root(const struct smb_vol *vol, pos = full_path + unc_len; if (pplen) { - *pos = CIFS_DIR_SEP(cifs_sb); - strncpy(pos + 1, vol->prepath, pplen); + *pos++ = CIFS_DIR_SEP(cifs_sb); + strncpy(pos, vol->prepath, pplen); pos += pplen; } diff --git a/trunk/fs/cifs/dns_resolve.c b/trunk/fs/cifs/dns_resolve.c index 7ede7306599f..e7512e497611 100644 --- a/trunk/fs/cifs/dns_resolve.c +++ b/trunk/fs/cifs/dns_resolve.c @@ -34,7 +34,7 @@ /** * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address. - * @unc: UNC path specifying the server (with '/' as delimiter) + * @unc: UNC path specifying the server * @ip_addr: Where to return the IP address. * * The IP address will be returned in string form, and the caller is @@ -64,7 +64,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) hostname = unc + 2; /* Search for server name delimiter */ - sep = memchr(hostname, '/', len); + sep = memchr(hostname, '\\', len); if (sep) len = sep - hostname; else diff --git a/trunk/fs/cifs/file.c b/trunk/fs/cifs/file.c index 4d8ba8d491e5..48b29d24c9f4 100644 --- a/trunk/fs/cifs/file.c +++ b/trunk/fs/cifs/file.c @@ -3546,12 +3546,11 @@ static int cifs_release_page(struct page *page, gfp_t gfp) return cifs_fscache_release_page(page, gfp); } -static void cifs_invalidate_page(struct page *page, unsigned int offset, - unsigned int length) +static void cifs_invalidate_page(struct page *page, unsigned long offset) { struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0) cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); } diff --git a/trunk/fs/cifs/inode.c b/trunk/fs/cifs/inode.c index 20efd81266c6..fc3025199cb3 100644 --- a/trunk/fs/cifs/inode.c +++ b/trunk/fs/cifs/inode.c @@ -171,8 +171,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL) inode->i_flags |= S_AUTOMOUNT; - if (inode->i_state & I_NEW) - cifs_set_ops(inode); + cifs_set_ops(inode); } void diff --git a/trunk/fs/cifs/readdir.c b/trunk/fs/cifs/readdir.c index f1213799de1a..770d5a9781c1 100644 --- a/trunk/fs/cifs/readdir.c +++ b/trunk/fs/cifs/readdir.c @@ -537,14 +537,14 @@ static int cifs_save_resume_key(const char *current_entry, * every entry (do not increment for . or .. entry). */ static int -find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, +find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, struct file *file, char **current_entry, int *num_to_ret) { __u16 search_flags; int rc = 0; int pos_in_buf = 0; loff_t first_entry_in_buffer; - loff_t index_to_find = pos; + loff_t index_to_find = file->f_pos; struct cifsFileInfo *cfile = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); struct TCP_Server_Info *server = tcon->ses->server; @@ -659,9 +659,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, return rc; } -static int cifs_filldir(char *find_entry, struct file *file, - struct dir_context *ctx, - char *scratch_buf, unsigned int max_len) +static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir, + void *dirent, char *scratch_buf, unsigned int max_len) { struct cifsFileInfo *file_info = file->private_data; struct super_block *sb = file->f_path.dentry->d_sb; @@ -741,11 +740,13 @@ static int cifs_filldir(char *find_entry, struct file *file, cifs_prime_dcache(file->f_dentry, &name, &fattr); ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); - return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype); + rc = filldir(dirent, name.name, name.len, file->f_pos, ino, + fattr.cf_dtype); + return rc; } -int cifs_readdir(struct file *file, struct dir_context *ctx) +int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) { int rc = 0; unsigned int xid; @@ -771,86 +772,103 @@ int cifs_readdir(struct file *file, struct dir_context *ctx) goto rddir2_exit; } - if (!dir_emit_dots(file, ctx)) - goto rddir2_exit; - - /* 1) If search is active, - is in current search buffer? - if it before then restart search - if after then keep searching till find it */ - - if (file->private_data == NULL) { - rc = -EINVAL; - goto rddir2_exit; - } - cifsFile = file->private_data; - if (cifsFile->srch_inf.endOfSearch) { - if (cifsFile->srch_inf.emptyDir) { - cifs_dbg(FYI, "End of search, empty dir\n"); - rc = 0; - goto rddir2_exit; + switch ((int) file->f_pos) { + case 0: + if (filldir(direntry, ".", 1, file->f_pos, + file_inode(file)->i_ino, DT_DIR) < 0) { + cifs_dbg(VFS, "Filldir for current dir failed\n"); + rc = -ENOMEM; + break; } - } /* else { - cifsFile->invalidHandle = true; - tcon->ses->server->close(xid, tcon, &cifsFile->fid); - } */ - - tcon = tlink_tcon(cifsFile->tlink); - rc = find_cifs_entry(xid, tcon, ctx->pos, file, ¤t_entry, - &num_to_fill); - if (rc) { - cifs_dbg(FYI, "fce error %d\n", rc); - goto rddir2_exit; - } else if (current_entry != NULL) { - cifs_dbg(FYI, "entry %lld found\n", ctx->pos); - } else { - cifs_dbg(FYI, "could not find entry\n"); - goto rddir2_exit; - } - cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n", - num_to_fill, cifsFile->srch_inf.ntwrk_buf_start); - max_len = tcon->ses->server->ops->calc_smb_size( - cifsFile->srch_inf.ntwrk_buf_start); - end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; - - tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); - if (tmp_buf == NULL) { - rc = -ENOMEM; - goto rddir2_exit; - } - - for (i = 0; i < num_to_fill; i++) { - if (current_entry == NULL) { - /* evaluate whether this case is an error */ - cifs_dbg(VFS, "past SMB end, num to fill %d i %d\n", - num_to_fill, i); + file->f_pos++; + case 1: + if (filldir(direntry, "..", 2, file->f_pos, + parent_ino(file->f_path.dentry), DT_DIR) < 0) { + cifs_dbg(VFS, "Filldir for parent dir failed\n"); + rc = -ENOMEM; break; } - /* - * if buggy server returns . and .. late do we want to - * check for that here? - */ - rc = cifs_filldir(current_entry, file, ctx, - tmp_buf, max_len); - if (rc) { - if (rc > 0) + file->f_pos++; + default: + /* 1) If search is active, + is in current search buffer? + if it before then restart search + if after then keep searching till find it */ + + if (file->private_data == NULL) { + rc = -EINVAL; + free_xid(xid); + return rc; + } + cifsFile = file->private_data; + if (cifsFile->srch_inf.endOfSearch) { + if (cifsFile->srch_inf.emptyDir) { + cifs_dbg(FYI, "End of search, empty dir\n"); rc = 0; + break; + } + } /* else { + cifsFile->invalidHandle = true; + tcon->ses->server->close(xid, tcon, &cifsFile->fid); + } */ + + tcon = tlink_tcon(cifsFile->tlink); + rc = find_cifs_entry(xid, tcon, file, ¤t_entry, + &num_to_fill); + if (rc) { + cifs_dbg(FYI, "fce error %d\n", rc); + goto rddir2_exit; + } else if (current_entry != NULL) { + cifs_dbg(FYI, "entry %lld found\n", file->f_pos); + } else { + cifs_dbg(FYI, "could not find entry\n"); + goto rddir2_exit; + } + cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n", + num_to_fill, cifsFile->srch_inf.ntwrk_buf_start); + max_len = tcon->ses->server->ops->calc_smb_size( + cifsFile->srch_inf.ntwrk_buf_start); + end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; + + tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); + if (tmp_buf == NULL) { + rc = -ENOMEM; break; } - ctx->pos++; - if (ctx->pos == - cifsFile->srch_inf.index_of_last_entry) { - cifs_dbg(FYI, "last entry in buf at pos %lld %s\n", - ctx->pos, tmp_buf); - cifs_save_resume_key(current_entry, cifsFile); - break; - } else - current_entry = - nxt_dir_entry(current_entry, end_of_smb, - cifsFile->srch_inf.info_level); - } - kfree(tmp_buf); + for (i = 0; (i < num_to_fill) && (rc == 0); i++) { + if (current_entry == NULL) { + /* evaluate whether this case is an error */ + cifs_dbg(VFS, "past SMB end, num to fill %d i %d\n", + num_to_fill, i); + break; + } + /* + * if buggy server returns . and .. late do we want to + * check for that here? + */ + rc = cifs_filldir(current_entry, file, filldir, + direntry, tmp_buf, max_len); + if (rc == -EOVERFLOW) { + rc = 0; + break; + } + + file->f_pos++; + if (file->f_pos == + cifsFile->srch_inf.index_of_last_entry) { + cifs_dbg(FYI, "last entry in buf at pos %lld %s\n", + file->f_pos, tmp_buf); + cifs_save_resume_key(current_entry, cifsFile); + break; + } else + current_entry = + nxt_dir_entry(current_entry, end_of_smb, + cifsFile->srch_inf.info_level); + } + kfree(tmp_buf); + break; + } /* end switch */ rddir2_exit: free_xid(xid); diff --git a/trunk/fs/coda/dir.c b/trunk/fs/coda/dir.c index 87e0ee9f4465..b7d3a05c062c 100644 --- a/trunk/fs/coda/dir.c +++ b/trunk/fs/coda/dir.c @@ -43,14 +43,15 @@ static int coda_rename(struct inode *old_inode, struct dentry *old_dentry, struct inode *new_inode, struct dentry *new_dentry); /* dir file-ops */ -static int coda_readdir(struct file *file, struct dir_context *ctx); +static int coda_readdir(struct file *file, void *buf, filldir_t filldir); /* dentry ops */ static int coda_dentry_revalidate(struct dentry *de, unsigned int flags); static int coda_dentry_delete(const struct dentry *); /* support routines */ -static int coda_venus_readdir(struct file *, struct dir_context *); +static int coda_venus_readdir(struct file *coda_file, void *buf, + filldir_t filldir); /* same as fs/bad_inode.c */ static int coda_return_EIO(void) @@ -84,7 +85,7 @@ const struct inode_operations coda_dir_inode_operations = const struct file_operations coda_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = coda_readdir, + .readdir = coda_readdir, .open = coda_open, .release = coda_release, .fsync = coda_fsync, @@ -377,7 +378,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, /* file operations for directories */ -static int coda_readdir(struct file *coda_file, struct dir_context *ctx) +static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir) { struct coda_file_info *cfi; struct file *host_file; @@ -390,19 +391,30 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx) if (!host_file->f_op) return -ENOTDIR; - if (host_file->f_op->iterate) { + if (host_file->f_op->readdir) + { + /* potemkin case: we were handed a directory inode. + * We can't use vfs_readdir because we have to keep the file + * position in sync between the coda_file and the host_file. + * and as such we need grab the inode mutex. */ struct inode *host_inode = file_inode(host_file); + mutex_lock(&host_inode->i_mutex); + host_file->f_pos = coda_file->f_pos; + ret = -ENOENT; if (!IS_DEADDIR(host_inode)) { - ret = host_file->f_op->iterate(host_file, ctx); + ret = host_file->f_op->readdir(host_file, buf, filldir); file_accessed(host_file); } + + coda_file->f_pos = host_file->f_pos; mutex_unlock(&host_inode->i_mutex); - return ret; } - /* Venus: we must read Venus dirents from a file */ - return coda_venus_readdir(coda_file, ctx); + else /* Venus: we must read Venus dirents from a file */ + ret = coda_venus_readdir(coda_file, buf, filldir); + + return ret; } static inline unsigned int CDT2DT(unsigned char cdt) @@ -425,8 +437,10 @@ static inline unsigned int CDT2DT(unsigned char cdt) } /* support routines */ -static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx) +static int coda_venus_readdir(struct file *coda_file, void *buf, + filldir_t filldir) { + int result = 0; /* # of entries returned */ struct coda_file_info *cfi; struct coda_inode_info *cii; struct file *host_file; @@ -448,12 +462,23 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx) vdir = kmalloc(sizeof(*vdir), GFP_KERNEL); if (!vdir) return -ENOMEM; - if (!dir_emit_dots(coda_file, ctx)) - goto out; - + if (coda_file->f_pos == 0) { + ret = filldir(buf, ".", 1, 0, de->d_inode->i_ino, DT_DIR); + if (ret < 0) + goto out; + result++; + coda_file->f_pos++; + } + if (coda_file->f_pos == 1) { + ret = filldir(buf, "..", 2, 1, parent_ino(de), DT_DIR); + if (ret < 0) + goto out; + result++; + coda_file->f_pos++; + } while (1) { /* read entries from the directory file */ - ret = kernel_read(host_file, ctx->pos - 2, (char *)vdir, + ret = kernel_read(host_file, coda_file->f_pos - 2, (char *)vdir, sizeof(*vdir)); if (ret < 0) { printk(KERN_ERR "coda readdir: read dir %s failed %d\n", @@ -482,7 +507,7 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx) /* Make sure we skip '.' and '..', we already got those */ if (name.name[0] == '.' && (name.len == 1 || - (name.name[1] == '.' && name.len == 2))) + (vdir->d_name[1] == '.' && name.len == 2))) vdir->d_fileno = name.len = 0; /* skip null entries */ @@ -495,16 +520,19 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx) if (!ino) ino = vdir->d_fileno; type = CDT2DT(vdir->d_type); - if (!dir_emit(ctx, name.name, name.len, ino, type)) - break; + ret = filldir(buf, name.name, name.len, + coda_file->f_pos, ino, type); + /* failure means no space for filling in this round */ + if (ret < 0) break; + result++; } /* we'll always have progress because d_reclen is unsigned and * we've already established it is non-zero. */ - ctx->pos += vdir->d_reclen; + coda_file->f_pos += vdir->d_reclen; } out: kfree(vdir); - return 0; + return result ? result : ret; } /* called when a cache lookup succeeds */ diff --git a/trunk/fs/compat.c b/trunk/fs/compat.c index 6af20de2c1a3..fc3b55dce184 100644 --- a/trunk/fs/compat.c +++ b/trunk/fs/compat.c @@ -832,7 +832,6 @@ struct compat_old_linux_dirent { }; struct compat_readdir_callback { - struct dir_context ctx; struct compat_old_linux_dirent __user *dirent; int result; }; @@ -874,15 +873,15 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd, { int error; struct fd f = fdget(fd); - struct compat_readdir_callback buf = { - .ctx.actor = compat_fillonedir, - .dirent = dirent - }; + struct compat_readdir_callback buf; if (!f.file) return -EBADF; - error = iterate_dir(f.file, &buf.ctx); + buf.result = 0; + buf.dirent = dirent; + + error = vfs_readdir(f.file, compat_fillonedir, &buf); if (buf.result) error = buf.result; @@ -898,7 +897,6 @@ struct compat_linux_dirent { }; struct compat_getdents_callback { - struct dir_context ctx; struct compat_linux_dirent __user *current_dir; struct compat_linux_dirent __user *previous; int count; @@ -953,11 +951,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd, { struct fd f; struct compat_linux_dirent __user * lastdirent; - struct compat_getdents_callback buf = { - .ctx.actor = compat_filldir, - .current_dir = dirent, - .count = count - }; + struct compat_getdents_callback buf; int error; if (!access_ok(VERIFY_WRITE, dirent, count)) @@ -967,12 +961,17 @@ asmlinkage long compat_sys_getdents(unsigned int fd, if (!f.file) return -EBADF; - error = iterate_dir(f.file, &buf.ctx); + buf.current_dir = dirent; + buf.previous = NULL; + buf.count = count; + buf.error = 0; + + error = vfs_readdir(f.file, compat_filldir, &buf); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { - if (put_user(buf.ctx.pos, &lastdirent->d_off)) + if (put_user(f.file->f_pos, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.count; @@ -984,7 +983,6 @@ asmlinkage long compat_sys_getdents(unsigned int fd, #ifndef __ARCH_OMIT_COMPAT_SYS_GETDENTS64 struct compat_getdents_callback64 { - struct dir_context ctx; struct linux_dirent64 __user *current_dir; struct linux_dirent64 __user *previous; int count; @@ -1038,11 +1036,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, { struct fd f; struct linux_dirent64 __user * lastdirent; - struct compat_getdents_callback64 buf = { - .ctx.actor = compat_filldir64, - .current_dir = dirent, - .count = count - }; + struct compat_getdents_callback64 buf; int error; if (!access_ok(VERIFY_WRITE, dirent, count)) @@ -1052,12 +1046,17 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, if (!f.file) return -EBADF; - error = iterate_dir(f.file, &buf.ctx); + buf.current_dir = dirent; + buf.previous = NULL; + buf.count = count; + buf.error = 0; + + error = vfs_readdir(f.file, compat_filldir64, &buf); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { - typeof(lastdirent->d_off) d_off = buf.ctx.pos; + typeof(lastdirent->d_off) d_off = f.file->f_pos; if (__put_user_unaligned(d_off, &lastdirent->d_off)) error = -EFAULT; else diff --git a/trunk/fs/compat_ioctl.c b/trunk/fs/compat_ioctl.c index 5d19acfa7c6c..996cdc5abb85 100644 --- a/trunk/fs/compat_ioctl.c +++ b/trunk/fs/compat_ioctl.c @@ -66,6 +66,7 @@ #include #ifdef CONFIG_BLOCK +#include #include #include #include @@ -953,6 +954,8 @@ COMPATIBLE_IOCTL(MTIOCTOP) /* Socket level stuff */ COMPATIBLE_IOCTL(FIOQSIZE) #ifdef CONFIG_BLOCK +/* loop */ +IGNORE_IOCTL(LOOP_CLR_FD) /* md calls this on random blockdevs */ IGNORE_IOCTL(RAID_VERSION) /* qemu/qemu-img might call these two on plain files for probing */ diff --git a/trunk/fs/configfs/dir.c b/trunk/fs/configfs/dir.c index 64e5323cbbb0..7aabc6ad4e9b 100644 --- a/trunk/fs/configfs/dir.c +++ b/trunk/fs/configfs/dir.c @@ -1532,66 +1532,84 @@ static inline unsigned char dt_type(struct configfs_dirent *sd) return (sd->s_mode >> 12) & 15; } -static int configfs_readdir(struct file *file, struct dir_context *ctx) +static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir) { - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = filp->f_path.dentry; struct super_block *sb = dentry->d_sb; struct configfs_dirent * parent_sd = dentry->d_fsdata; - struct configfs_dirent *cursor = file->private_data; + struct configfs_dirent *cursor = filp->private_data; struct list_head *p, *q = &cursor->s_sibling; ino_t ino = 0; + int i = filp->f_pos; - if (!dir_emit_dots(file, ctx)) - return 0; - if (ctx->pos == 2) { - spin_lock(&configfs_dirent_lock); - list_move(q, &parent_sd->s_children); - spin_unlock(&configfs_dirent_lock); - } - for (p = q->next; p != &parent_sd->s_children; p = p->next) { - struct configfs_dirent *next; - const char *name; - int len; - struct inode *inode = NULL; - - next = list_entry(p, struct configfs_dirent, s_sibling); - if (!next->s_element) - continue; - - name = configfs_get_name(next); - len = strlen(name); + switch (i) { + case 0: + ino = dentry->d_inode->i_ino; + if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) + break; + filp->f_pos++; + i++; + /* fallthrough */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) + break; + filp->f_pos++; + i++; + /* fallthrough */ + default: + if (filp->f_pos == 2) { + spin_lock(&configfs_dirent_lock); + list_move(q, &parent_sd->s_children); + spin_unlock(&configfs_dirent_lock); + } + for (p=q->next; p!= &parent_sd->s_children; p=p->next) { + struct configfs_dirent *next; + const char * name; + int len; + struct inode *inode = NULL; - /* - * We'll have a dentry and an inode for - * PINNED items and for open attribute - * files. We lock here to prevent a race - * with configfs_d_iput() clearing - * s_dentry before calling iput(). - * - * Why do we go to the trouble? If - * someone has an attribute file open, - * the inode number should match until - * they close it. Beyond that, we don't - * care. - */ - spin_lock(&configfs_dirent_lock); - dentry = next->s_dentry; - if (dentry) - inode = dentry->d_inode; - if (inode) - ino = inode->i_ino; - spin_unlock(&configfs_dirent_lock); - if (!inode) - ino = iunique(sb, 2); + next = list_entry(p, struct configfs_dirent, + s_sibling); + if (!next->s_element) + continue; + + name = configfs_get_name(next); + len = strlen(name); + + /* + * We'll have a dentry and an inode for + * PINNED items and for open attribute + * files. We lock here to prevent a race + * with configfs_d_iput() clearing + * s_dentry before calling iput(). + * + * Why do we go to the trouble? If + * someone has an attribute file open, + * the inode number should match until + * they close it. Beyond that, we don't + * care. + */ + spin_lock(&configfs_dirent_lock); + dentry = next->s_dentry; + if (dentry) + inode = dentry->d_inode; + if (inode) + ino = inode->i_ino; + spin_unlock(&configfs_dirent_lock); + if (!inode) + ino = iunique(sb, 2); - if (!dir_emit(ctx, name, len, ino, dt_type(next))) - return 0; + if (filldir(dirent, name, len, filp->f_pos, ino, + dt_type(next)) < 0) + return 0; - spin_lock(&configfs_dirent_lock); - list_move(q, p); - spin_unlock(&configfs_dirent_lock); - p = q; - ctx->pos++; + spin_lock(&configfs_dirent_lock); + list_move(q, p); + spin_unlock(&configfs_dirent_lock); + p = q; + filp->f_pos++; + } } return 0; } @@ -1643,7 +1661,7 @@ const struct file_operations configfs_dir_operations = { .release = configfs_dir_close, .llseek = configfs_dir_lseek, .read = generic_read_dir, - .iterate = configfs_readdir, + .readdir = configfs_readdir, }; int configfs_register_subsystem(struct configfs_subsystem *subsys) diff --git a/trunk/fs/cramfs/inode.c b/trunk/fs/cramfs/inode.c index e501ac3a49ff..35b1c7bd18b7 100644 --- a/trunk/fs/cramfs/inode.c +++ b/trunk/fs/cramfs/inode.c @@ -349,17 +349,18 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf) /* * Read a cramfs directory entry. */ -static int cramfs_readdir(struct file *file, struct dir_context *ctx) +static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; char *buf; unsigned int offset; + int copied; /* Offset within the thing. */ - if (ctx->pos >= inode->i_size) + offset = filp->f_pos; + if (offset >= inode->i_size) return 0; - offset = ctx->pos; /* Directory entries are always 4-byte aligned */ if (offset & 3) return -EINVAL; @@ -368,13 +369,14 @@ static int cramfs_readdir(struct file *file, struct dir_context *ctx) if (!buf) return -ENOMEM; + copied = 0; while (offset < inode->i_size) { struct cramfs_inode *de; unsigned long nextoffset; char *name; ino_t ino; umode_t mode; - int namelen; + int namelen, error; mutex_lock(&read_mutex); de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN); @@ -400,10 +402,13 @@ static int cramfs_readdir(struct file *file, struct dir_context *ctx) break; namelen--; } - if (!dir_emit(ctx, buf, namelen, ino, mode >> 12)) + error = filldir(dirent, buf, namelen, offset, ino, mode >> 12); + if (error) break; - ctx->pos = offset = nextoffset; + offset = nextoffset; + filp->f_pos = offset; + copied++; } kfree(buf); return 0; @@ -542,7 +547,7 @@ static const struct address_space_operations cramfs_aops = { static const struct file_operations cramfs_directory_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = cramfs_readdir, + .readdir = cramfs_readdir, }; static const struct inode_operations cramfs_dir_inode_operations = { diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c index 5a23073138df..f09b9085f7d8 100644 --- a/trunk/fs/dcache.c +++ b/trunk/fs/dcache.c @@ -1612,10 +1612,6 @@ EXPORT_SYMBOL(d_obtain_alias); * If a dentry was found and moved, then it is returned. Otherwise NULL * is returned. This matches the expected return value of ->lookup. * - * Cluster filesystems may call this function with a negative, hashed dentry. - * In that case, we know that the inode will be a regular file, and also this - * will only occur during atomic_open. So we need to check for the dentry - * being already hashed only in the final case. */ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) { @@ -1640,11 +1636,8 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) security_d_instantiate(dentry, inode); d_rehash(dentry); } - } else { - d_instantiate(dentry, inode); - if (d_unhashed(dentry)) - d_rehash(dentry); - } + } else + d_add(dentry, inode); return new; } EXPORT_SYMBOL(d_splice_alias); diff --git a/trunk/fs/dlm/config.c b/trunk/fs/dlm/config.c index 76feb4b60fa6..7d58d5b112b5 100644 --- a/trunk/fs/dlm/config.c +++ b/trunk/fs/dlm/config.c @@ -138,9 +138,8 @@ static ssize_t cluster_cluster_name_read(struct dlm_cluster *cl, char *buf) static ssize_t cluster_cluster_name_write(struct dlm_cluster *cl, const char *buf, size_t len) { - strlcpy(dlm_config.ci_cluster_name, buf, - sizeof(dlm_config.ci_cluster_name)); - strlcpy(cl->cl_cluster_name, buf, sizeof(cl->cl_cluster_name)); + strncpy(dlm_config.ci_cluster_name, buf, DLM_LOCKSPACE_LEN); + strncpy(cl->cl_cluster_name, buf, DLM_LOCKSPACE_LEN); return len; } diff --git a/trunk/fs/dlm/lock.c b/trunk/fs/dlm/lock.c index e223a911a834..1b1146670c4b 100644 --- a/trunk/fs/dlm/lock.c +++ b/trunk/fs/dlm/lock.c @@ -2038,8 +2038,8 @@ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb, b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; if (b == 1) { int len = receive_extralen(ms); - if (len > r->res_ls->ls_lvblen) - len = r->res_ls->ls_lvblen; + if (len > DLM_RESNAME_MAXLEN) + len = DLM_RESNAME_MAXLEN; memcpy(lkb->lkb_lvbptr, ms->m_extra, len); lkb->lkb_lvbseq = ms->m_lvbseq; } @@ -3893,8 +3893,8 @@ static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb, if (!lkb->lkb_lvbptr) return -ENOMEM; len = receive_extralen(ms); - if (len > ls->ls_lvblen) - len = ls->ls_lvblen; + if (len > DLM_RESNAME_MAXLEN) + len = DLM_RESNAME_MAXLEN; memcpy(lkb->lkb_lvbptr, ms->m_extra, len); } return 0; diff --git a/trunk/fs/dlm/lockspace.c b/trunk/fs/dlm/lockspace.c index 88556dc0458e..3ca79d3253b9 100644 --- a/trunk/fs/dlm/lockspace.c +++ b/trunk/fs/dlm/lockspace.c @@ -883,24 +883,17 @@ int dlm_release_lockspace(void *lockspace, int force) void dlm_stop_lockspaces(void) { struct dlm_ls *ls; - int count; restart: - count = 0; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { - if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) { - count++; + if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) continue; - } spin_unlock(&lslist_lock); log_error(ls, "no userland control daemon, stopping lockspace"); dlm_ls_stop(ls); goto restart; } spin_unlock(&lslist_lock); - - if (count) - log_print("dlm user daemon left %d lockspaces", count); } diff --git a/trunk/fs/dlm/lowcomms.c b/trunk/fs/dlm/lowcomms.c index d90909ec6aa6..d0ccd2fd79eb 100644 --- a/trunk/fs/dlm/lowcomms.c +++ b/trunk/fs/dlm/lowcomms.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -125,7 +126,6 @@ struct connection { struct connection *othercon; struct work_struct rwork; /* Receive workqueue */ struct work_struct swork; /* Send workqueue */ - bool try_new_addr; }; #define sock2con(x) ((struct connection *)(x)->sk_user_data) @@ -144,7 +144,6 @@ struct dlm_node_addr { struct list_head list; int nodeid; int addr_count; - int curr_addr_index; struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; }; @@ -311,7 +310,7 @@ static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y) } static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, - struct sockaddr *sa_out, bool try_new_addr) + struct sockaddr *sa_out) { struct sockaddr_storage sas; struct dlm_node_addr *na; @@ -321,16 +320,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, spin_lock(&dlm_node_addrs_spin); na = find_node_addr(nodeid); - if (na && na->addr_count) { - if (try_new_addr) { - na->curr_addr_index++; - if (na->curr_addr_index == na->addr_count) - na->curr_addr_index = 0; - } - - memcpy(&sas, na->addr[na->curr_addr_index ], - sizeof(struct sockaddr_storage)); - } + if (na && na->addr_count) + memcpy(&sas, na->addr[0], sizeof(struct sockaddr_storage)); spin_unlock(&dlm_node_addrs_spin); if (!na) @@ -362,22 +353,19 @@ static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid) { struct dlm_node_addr *na; int rv = -EEXIST; - int addr_i; spin_lock(&dlm_node_addrs_spin); list_for_each_entry(na, &dlm_node_addrs, list) { if (!na->addr_count) continue; - for (addr_i = 0; addr_i < na->addr_count; addr_i++) { - if (addr_compare(na->addr[addr_i], addr)) { - *nodeid = na->nodeid; - rv = 0; - goto unlock; - } - } + if (!addr_compare(na->addr[0], addr)) + continue; + + *nodeid = na->nodeid; + rv = 0; + break; } -unlock: spin_unlock(&dlm_node_addrs_spin); return rv; } @@ -573,23 +561,8 @@ static void sctp_send_shutdown(sctp_assoc_t associd) static void sctp_init_failed_foreach(struct connection *con) { - - /* - * Don't try to recover base con and handle race where the - * other node's assoc init creates a assoc and we get that - * notification, then we get a notification that our attempt - * failed due. This happens when we are still trying the primary - * address, but the other node has already tried secondary addrs - * and found one that worked. - */ - if (!con->nodeid || con->sctp_assoc) - return; - - log_print("Retrying SCTP association init for node %d\n", con->nodeid); - - con->try_new_addr = true; con->sctp_assoc = 0; - if (test_and_clear_bit(CF_INIT_PENDING, &con->flags)) { + if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) queue_work(send_workqueue, &con->swork); } @@ -606,56 +579,15 @@ static void sctp_init_failed(void) mutex_unlock(&connections_lock); } -static void retry_failed_sctp_send(struct connection *recv_con, - struct sctp_send_failed *sn_send_failed, - char *buf) -{ - int len = sn_send_failed->ssf_length - sizeof(struct sctp_send_failed); - struct dlm_mhandle *mh; - struct connection *con; - char *retry_buf; - int nodeid = sn_send_failed->ssf_info.sinfo_ppid; - - log_print("Retry sending %d bytes to node id %d", len, nodeid); - - con = nodeid2con(nodeid, 0); - if (!con) { - log_print("Could not look up con for nodeid %d\n", - nodeid); - return; - } - - mh = dlm_lowcomms_get_buffer(nodeid, len, GFP_NOFS, &retry_buf); - if (!mh) { - log_print("Could not allocate buf for retry."); - return; - } - memcpy(retry_buf, buf + sizeof(struct sctp_send_failed), len); - dlm_lowcomms_commit_buffer(mh); - - /* - * If we got a assoc changed event before the send failed event then - * we only need to retry the send. - */ - if (con->sctp_assoc) { - if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) - queue_work(send_workqueue, &con->swork); - } else - sctp_init_failed_foreach(con); -} - /* Something happened to an association */ static void process_sctp_notification(struct connection *con, struct msghdr *msg, char *buf) { union sctp_notification *sn = (union sctp_notification *)buf; - switch (sn->sn_header.sn_type) { - case SCTP_SEND_FAILED: - retry_failed_sctp_send(con, &sn->sn_send_failed, buf); - break; - case SCTP_ASSOC_CHANGE: + if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) { switch (sn->sn_assoc_change.sac_state) { + case SCTP_COMM_UP: case SCTP_RESTART: { @@ -730,11 +662,9 @@ static void process_sctp_notification(struct connection *con, log_print("connecting to %d sctp association %d", nodeid, (int)sn->sn_assoc_change.sac_assoc_id); - new_con->sctp_assoc = sn->sn_assoc_change.sac_assoc_id; - new_con->try_new_addr = false; /* Send any pending writes */ clear_bit(CF_CONNECT_PENDING, &new_con->flags); - clear_bit(CF_INIT_PENDING, &new_con->flags); + clear_bit(CF_INIT_PENDING, &con->flags); if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) { queue_work(send_workqueue, &new_con->swork); } @@ -753,10 +683,14 @@ static void process_sctp_notification(struct connection *con, } break; + /* We don't know which INIT failed, so clear the PENDING flags + * on them all. if assoc_id is zero then it will then try + * again */ + case SCTP_CANT_STR_ASSOC: { - /* Will retry init when we get the send failed notification */ log_print("Can't start SCTP association - retrying"); + sctp_init_failed(); } break; @@ -765,8 +699,6 @@ static void process_sctp_notification(struct connection *con, (int)sn->sn_assoc_change.sac_assoc_id, sn->sn_assoc_change.sac_state); } - default: - ; /* fall through */ } } @@ -1026,24 +958,6 @@ static void free_entry(struct writequeue_entry *e) kfree(e); } -/* - * writequeue_entry_complete - try to delete and free write queue entry - * @e: write queue entry to try to delete - * @completed: bytes completed - * - * writequeue_lock must be held. - */ -static void writequeue_entry_complete(struct writequeue_entry *e, int completed) -{ - e->offset += completed; - e->len -= completed; - - if (e->len == 0 && e->users == 0) { - list_del(&e->list); - free_entry(e); - } -} - /* Initiate an SCTP association. This is a special case of send_to_sock() in that we don't yet have a peeled-off socket for this association, so we use the listening socket @@ -1063,14 +977,15 @@ static void sctp_init_assoc(struct connection *con) int addrlen; struct kvec iov[1]; - mutex_lock(&con->sock_mutex); if (test_and_set_bit(CF_INIT_PENDING, &con->flags)) - goto unlock; + return; + + if (con->retries++ > MAX_CONNECT_RETRIES) + return; - if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr, - con->try_new_addr)) { + if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr)) { log_print("no address for nodeid %d", con->nodeid); - goto unlock; + return; } base_con = nodeid2con(0, 0); BUG_ON(base_con == NULL); @@ -1088,25 +1003,17 @@ static void sctp_init_assoc(struct connection *con) if (list_empty(&con->writequeue)) { spin_unlock(&con->writequeue_lock); log_print("writequeue empty for nodeid %d", con->nodeid); - goto unlock; + return; } e = list_first_entry(&con->writequeue, struct writequeue_entry, list); len = e->len; offset = e->offset; + spin_unlock(&con->writequeue_lock); /* Send the first block off the write queue */ iov[0].iov_base = page_address(e->page)+offset; iov[0].iov_len = len; - spin_unlock(&con->writequeue_lock); - - if (rem_addr.ss_family == AF_INET) { - struct sockaddr_in *sin = (struct sockaddr_in *)&rem_addr; - log_print("Trying to connect to %pI4", &sin->sin_addr.s_addr); - } else { - struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&rem_addr; - log_print("Trying to connect to %pI6", &sin6->sin6_addr); - } cmsg = CMSG_FIRSTHDR(&outmessage); cmsg->cmsg_level = IPPROTO_SCTP; @@ -1114,9 +1021,8 @@ static void sctp_init_assoc(struct connection *con) cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); sinfo = CMSG_DATA(cmsg); memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); - sinfo->sinfo_ppid = cpu_to_le32(con->nodeid); + sinfo->sinfo_ppid = cpu_to_le32(dlm_our_nodeid()); outmessage.msg_controllen = cmsg->cmsg_len; - sinfo->sinfo_flags |= SCTP_ADDR_OVER; ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len); if (ret < 0) { @@ -1129,12 +1035,15 @@ static void sctp_init_assoc(struct connection *con) } else { spin_lock(&con->writequeue_lock); - writequeue_entry_complete(e, ret); + e->offset += ret; + e->len -= ret; + + if (e->len == 0 && e->users == 0) { + list_del(&e->list); + free_entry(e); + } spin_unlock(&con->writequeue_lock); } - -unlock: - mutex_unlock(&con->sock_mutex); } /* Connect a new socket to its peer */ @@ -1166,7 +1075,7 @@ static void tcp_connect_to_sock(struct connection *con) goto out_err; memset(&saddr, 0, sizeof(saddr)); - result = nodeid_to_addr(con->nodeid, &saddr, NULL, false); + result = nodeid_to_addr(con->nodeid, &saddr, NULL); if (result < 0) { log_print("no address for nodeid %d", con->nodeid); goto out_err; @@ -1345,7 +1254,6 @@ static int sctp_listen_for_all(void) int result = -EINVAL, num = 1, i, addr_len; struct connection *con = nodeid2con(0, GFP_NOFS); int bufsize = NEEDED_RMEM; - int one = 1; if (!con) return -ENOMEM; @@ -1380,11 +1288,6 @@ static int sctp_listen_for_all(void) goto create_delsock; } - result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one, - sizeof(one)); - if (result < 0) - log_print("Could not set SCTP NODELAY error %d\n", result); - /* Init con struct */ sock->sk->sk_user_data = con; con->sock = sock; @@ -1590,7 +1493,13 @@ static void send_to_sock(struct connection *con) } spin_lock(&con->writequeue_lock); - writequeue_entry_complete(e, ret); + e->offset += ret; + e->len -= ret; + + if (e->len == 0 && e->users == 0) { + list_del(&e->list); + free_entry(e); + } } spin_unlock(&con->writequeue_lock); out: diff --git a/trunk/fs/ecryptfs/file.c b/trunk/fs/ecryptfs/file.c index 9aa05e08060b..201f0a0d6b0a 100644 --- a/trunk/fs/ecryptfs/file.c +++ b/trunk/fs/ecryptfs/file.c @@ -68,9 +68,9 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb, } struct ecryptfs_getdents_callback { - struct dir_context ctx; - struct dir_context *caller; + void *dirent; struct dentry *dentry; + filldir_t filldir; int filldir_called; int entries_written; }; @@ -96,10 +96,9 @@ ecryptfs_filldir(void *dirent, const char *lower_name, int lower_namelen, rc); goto out; } - buf->caller->pos = buf->ctx.pos; - rc = !dir_emit(buf->caller, name, name_size, ino, d_type); + rc = buf->filldir(buf->dirent, name, name_size, offset, ino, d_type); kfree(name); - if (!rc) + if (rc >= 0) buf->entries_written++; out: return rc; @@ -108,23 +107,27 @@ ecryptfs_filldir(void *dirent, const char *lower_name, int lower_namelen, /** * ecryptfs_readdir * @file: The eCryptfs directory file - * @ctx: The actor to feed the entries to + * @dirent: Directory entry handle + * @filldir: The filldir callback function */ -static int ecryptfs_readdir(struct file *file, struct dir_context *ctx) +static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir) { int rc; struct file *lower_file; struct inode *inode; - struct ecryptfs_getdents_callback buf = { - .ctx.actor = ecryptfs_filldir, - .caller = ctx, - .dentry = file->f_path.dentry - }; + struct ecryptfs_getdents_callback buf; + lower_file = ecryptfs_file_to_lower(file); - lower_file->f_pos = ctx->pos; + lower_file->f_pos = file->f_pos; inode = file_inode(file); - rc = iterate_dir(lower_file, &buf.ctx); - ctx->pos = buf.ctx.pos; + memset(&buf, 0, sizeof(buf)); + buf.dirent = dirent; + buf.dentry = file->f_path.dentry; + buf.filldir = filldir; + buf.filldir_called = 0; + buf.entries_written = 0; + rc = vfs_readdir(lower_file, ecryptfs_filldir, (void *)&buf); + file->f_pos = lower_file->f_pos; if (rc < 0) goto out; if (buf.filldir_called && !buf.entries_written) @@ -292,12 +295,6 @@ static int ecryptfs_release(struct inode *inode, struct file *file) static int ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - int rc; - - rc = filemap_write_and_wait(file->f_mapping); - if (rc) - return rc; - return vfs_fsync(ecryptfs_file_to_lower(file), datasync); } @@ -341,7 +338,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #endif const struct file_operations ecryptfs_dir_fops = { - .iterate = ecryptfs_readdir, + .readdir = ecryptfs_readdir, .read = generic_read_dir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT @@ -362,7 +359,7 @@ const struct file_operations ecryptfs_main_fops = { .aio_read = ecryptfs_read_update_atime, .write = do_sync_write, .aio_write = generic_file_aio_write, - .iterate = ecryptfs_readdir, + .readdir = ecryptfs_readdir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, diff --git a/trunk/fs/efivarfs/file.c b/trunk/fs/efivarfs/file.c index 8dd524f32284..bfb531564319 100644 --- a/trunk/fs/efivarfs/file.c +++ b/trunk/fs/efivarfs/file.c @@ -44,11 +44,8 @@ static ssize_t efivarfs_file_write(struct file *file, bytes = efivar_entry_set_get_size(var, attributes, &datasize, data, &set); - if (!set && bytes) { - if (bytes == -ENOENT) - bytes = -EIO; + if (!set && bytes) goto out; - } if (bytes == -ENOENT) { drop_nlink(inode); @@ -79,14 +76,7 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf, int err; err = efivar_entry_size(var, &datasize); - - /* - * efivarfs represents uncommitted variables with - * zero-length files. Reading them should return EOF. - */ - if (err == -ENOENT) - return 0; - else if (err) + if (err) return err; data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL); diff --git a/trunk/fs/efs/dir.c b/trunk/fs/efs/dir.c index b72307ccdf7a..055a9e9ca747 100644 --- a/trunk/fs/efs/dir.c +++ b/trunk/fs/efs/dir.c @@ -7,38 +7,40 @@ #include #include "efs.h" -static int efs_readdir(struct file *, struct dir_context *); +static int efs_readdir(struct file *, void *, filldir_t); const struct file_operations efs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = efs_readdir, + .readdir = efs_readdir, }; const struct inode_operations efs_dir_inode_operations = { .lookup = efs_lookup, }; -static int efs_readdir(struct file *file, struct dir_context *ctx) -{ - struct inode *inode = file_inode(file); +static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) { + struct inode *inode = file_inode(filp); + struct buffer_head *bh; + + struct efs_dir *dirblock; + struct efs_dentry *dirslot; + efs_ino_t inodenum; efs_block_t block; - int slot; + int slot, namelen; + char *nameptr; if (inode->i_size & (EFS_DIRBSIZE-1)) printk(KERN_WARNING "EFS: WARNING: readdir(): directory size not a multiple of EFS_DIRBSIZE\n"); /* work out where this entry can be found */ - block = ctx->pos >> EFS_DIRBSIZE_BITS; + block = filp->f_pos >> EFS_DIRBSIZE_BITS; /* each block contains at most 256 slots */ - slot = ctx->pos & 0xff; + slot = filp->f_pos & 0xff; /* look at all blocks */ while (block < inode->i_blocks) { - struct efs_dir *dirblock; - struct buffer_head *bh; - /* read the dir block */ bh = sb_bread(inode->i_sb, efs_bmap(inode, block)); @@ -55,14 +57,11 @@ static int efs_readdir(struct file *file, struct dir_context *ctx) break; } - for (; slot < dirblock->slots; slot++) { - struct efs_dentry *dirslot; - efs_ino_t inodenum; - const char *nameptr; - int namelen; - - if (dirblock->space[slot] == 0) + while (slot < dirblock->slots) { + if (dirblock->space[slot] == 0) { + slot++; continue; + } dirslot = (struct efs_dentry *) (((char *) bh->b_data) + EFS_SLOTAT(dirblock, slot)); @@ -73,29 +72,39 @@ static int efs_readdir(struct file *file, struct dir_context *ctx) #ifdef DEBUG printk(KERN_DEBUG "EFS: readdir(): block %d slot %d/%d: inode %u, name \"%s\", namelen %u\n", block, slot, dirblock->slots-1, inodenum, nameptr, namelen); #endif - if (!namelen) - continue; - /* found the next entry */ - ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot; - - /* sanity check */ - if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) { - printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot); - continue; - } - - /* copy filename and data in dirslot */ - if (!dir_emit(ctx, nameptr, namelen, inodenum, DT_UNKNOWN)) { + if (namelen > 0) { + /* found the next entry */ + filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot; + + /* copy filename and data in dirslot */ + filldir(dirent, nameptr, namelen, filp->f_pos, inodenum, DT_UNKNOWN); + + /* sanity check */ + if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) { + printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot); + slot++; + continue; + } + + /* store position of next slot */ + if (++slot == dirblock->slots) { + slot = 0; + block++; + } brelse(bh); - return 0; + filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot; + goto out; } + slot++; } brelse(bh); slot = 0; block++; } - ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot; + + filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot; +out: return 0; } diff --git a/trunk/fs/exec.c b/trunk/fs/exec.c index ffd7a813ad3d..643019585574 100644 --- a/trunk/fs/exec.c +++ b/trunk/fs/exec.c @@ -1135,6 +1135,13 @@ void setup_new_exec(struct linux_binprm * bprm) set_dumpable(current->mm, suid_dumpable); } + /* + * Flush performance counters when crossing a + * security domain: + */ + if (!get_dumpable(current->mm)) + perf_event_exit_task(current); + /* An exec changes our domain. We are no longer part of the thread group */ @@ -1198,15 +1205,6 @@ void install_exec_creds(struct linux_binprm *bprm) commit_creds(bprm->cred); bprm->cred = NULL; - - /* - * Disable monitoring for regular users - * when executing setuid binaries. Must - * wait until new credentials are committed - * by commit_creds() above - */ - if (get_dumpable(current->mm) != SUID_DUMP_USER) - perf_event_exit_task(current); /* * cred_guard_mutex must be held at least to this point to prevent * ptrace_attach() from altering our determination of the task's diff --git a/trunk/fs/exofs/dir.c b/trunk/fs/exofs/dir.c index 49f51ab4caac..46375896cfc0 100644 --- a/trunk/fs/exofs/dir.c +++ b/trunk/fs/exofs/dir.c @@ -239,19 +239,22 @@ void exofs_set_de_type(struct exofs_dir_entry *de, struct inode *inode) } static int -exofs_readdir(struct file *file, struct dir_context *ctx) +exofs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - loff_t pos = ctx->pos; - struct inode *inode = file_inode(file); + loff_t pos = filp->f_pos; + struct inode *inode = file_inode(filp); unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(exofs_chunk_size(inode)-1); - int need_revalidate = (file->f_version != inode->i_version); + unsigned char *types = NULL; + int need_revalidate = (filp->f_version != inode->i_version); if (pos > inode->i_size - EXOFS_DIR_REC_LEN(1)) return 0; + types = exofs_filetype_table; + for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; struct exofs_dir_entry *de; @@ -260,7 +263,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx) if (IS_ERR(page)) { EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + filp->f_pos += PAGE_CACHE_SIZE - offset; return PTR_ERR(page); } kaddr = page_address(page); @@ -268,9 +271,9 @@ exofs_readdir(struct file *file, struct dir_context *ctx) if (offset) { offset = exofs_validate_entry(kaddr, offset, chunk_mask); - ctx->pos = (n<f_pos = (n<f_version = inode->i_version; + filp->f_version = inode->i_version; need_revalidate = 0; } de = (struct exofs_dir_entry *)(kaddr + offset); @@ -285,24 +288,27 @@ exofs_readdir(struct file *file, struct dir_context *ctx) return -EIO; } if (de->inode_no) { - unsigned char t; + int over; + unsigned char d_type = DT_UNKNOWN; - if (de->file_type < EXOFS_FT_MAX) - t = exofs_filetype_table[de->file_type]; - else - t = DT_UNKNOWN; + if (types && de->file_type < EXOFS_FT_MAX) + d_type = types[de->file_type]; - if (!dir_emit(ctx, de->name, de->name_len, + offset = (char *)de - kaddr; + over = filldir(dirent, de->name, de->name_len, + (n<inode_no), - t)) { + d_type); + if (over) { exofs_put_page(page); return 0; } } - ctx->pos += le16_to_cpu(de->rec_len); + filp->f_pos += le16_to_cpu(de->rec_len); } exofs_put_page(page); } + return 0; } @@ -663,5 +669,5 @@ int exofs_empty_dir(struct inode *inode) const struct file_operations exofs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = exofs_readdir, + .readdir = exofs_readdir, }; diff --git a/trunk/fs/exofs/inode.c b/trunk/fs/exofs/inode.c index 2ec8eb1ab269..d1f80abd8828 100644 --- a/trunk/fs/exofs/inode.c +++ b/trunk/fs/exofs/inode.c @@ -953,11 +953,9 @@ static int exofs_releasepage(struct page *page, gfp_t gfp) return 0; } -static void exofs_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void exofs_invalidatepage(struct page *page, unsigned long offset) { - EXOFS_DBGMSG("page 0x%lx offset 0x%x length 0x%x\n", - page->index, offset, length); + EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset); WARN_ON(1); } diff --git a/trunk/fs/exportfs/expfs.c b/trunk/fs/exportfs/expfs.c index 293bc2e47a73..262fc9940982 100644 --- a/trunk/fs/exportfs/expfs.c +++ b/trunk/fs/exportfs/expfs.c @@ -212,7 +212,6 @@ reconnect_path(struct vfsmount *mnt, struct dentry *target_dir, char *nbuf) } struct getdents_callback { - struct dir_context ctx; char *name; /* name that was found. It already points to a buffer NAME_MAX+1 is size */ unsigned long ino; /* the inum we are looking for */ @@ -255,11 +254,7 @@ static int get_name(const struct path *path, char *name, struct dentry *child) struct inode *dir = path->dentry->d_inode; int error; struct file *file; - struct getdents_callback buffer = { - .ctx.actor = filldir_one, - .name = name, - .ino = child->d_inode->i_ino - }; + struct getdents_callback buffer; error = -ENOTDIR; if (!dir || !S_ISDIR(dir->i_mode)) @@ -276,14 +271,17 @@ static int get_name(const struct path *path, char *name, struct dentry *child) goto out; error = -EINVAL; - if (!file->f_op->iterate) + if (!file->f_op->readdir) goto out_close; + buffer.name = name; + buffer.ino = child->d_inode->i_ino; + buffer.found = 0; buffer.sequence = 0; while (1) { int old_seq = buffer.sequence; - error = iterate_dir(file, &buffer.ctx); + error = vfs_readdir(file, filldir_one, &buffer); if (buffer.found) { error = 0; break; diff --git a/trunk/fs/ext2/dir.c b/trunk/fs/ext2/dir.c index 6e1d4ab09d72..4237722bfd27 100644 --- a/trunk/fs/ext2/dir.c +++ b/trunk/fs/ext2/dir.c @@ -287,17 +287,17 @@ static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode) } static int -ext2_readdir(struct file *file, struct dir_context *ctx) +ext2_readdir (struct file * filp, void * dirent, filldir_t filldir) { - loff_t pos = ctx->pos; - struct inode *inode = file_inode(file); + loff_t pos = filp->f_pos; + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); unsigned char *types = NULL; - int need_revalidate = file->f_version != inode->i_version; + int need_revalidate = filp->f_version != inode->i_version; if (pos > inode->i_size - EXT2_DIR_REC_LEN(1)) return 0; @@ -314,16 +314,16 @@ ext2_readdir(struct file *file, struct dir_context *ctx) ext2_error(sb, __func__, "bad page in #%lu", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + filp->f_pos += PAGE_CACHE_SIZE - offset; return PTR_ERR(page); } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = ext2_validate_entry(kaddr, offset, chunk_mask); - ctx->pos = (n<f_pos = (n<f_version = inode->i_version; + filp->f_version = inode->i_version; need_revalidate = 0; } de = (ext2_dirent *)(kaddr+offset); @@ -336,19 +336,22 @@ ext2_readdir(struct file *file, struct dir_context *ctx) return -EIO; } if (de->inode) { + int over; unsigned char d_type = DT_UNKNOWN; if (types && de->file_type < EXT2_FT_MAX) d_type = types[de->file_type]; - if (!dir_emit(ctx, de->name, de->name_len, - le32_to_cpu(de->inode), - d_type)) { + offset = (char *)de - kaddr; + over = filldir(dirent, de->name, de->name_len, + (n<inode), d_type); + if (over) { ext2_put_page(page); return 0; } } - ctx->pos += ext2_rec_len_from_disk(de->rec_len); + filp->f_pos += ext2_rec_len_from_disk(de->rec_len); } ext2_put_page(page); } @@ -721,7 +724,7 @@ int ext2_empty_dir (struct inode * inode) const struct file_operations ext2_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = ext2_readdir, + .readdir = ext2_readdir, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, diff --git a/trunk/fs/ext3/dir.c b/trunk/fs/ext3/dir.c index f522425aaa24..87eccbbca255 100644 --- a/trunk/fs/ext3/dir.c +++ b/trunk/fs/ext3/dir.c @@ -28,7 +28,8 @@ static unsigned char ext3_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; -static int ext3_dx_readdir(struct file *, struct dir_context *); +static int ext3_dx_readdir(struct file * filp, + void * dirent, filldir_t filldir); static unsigned char get_dtype(struct super_block *sb, int filetype) { @@ -90,30 +91,36 @@ int ext3_check_dir_entry (const char * function, struct inode * dir, return error_msg == NULL ? 1 : 0; } -static int ext3_readdir(struct file *file, struct dir_context *ctx) +static int ext3_readdir(struct file * filp, + void * dirent, filldir_t filldir) { + int error = 0; unsigned long offset; - int i; + int i, stored; struct ext3_dir_entry_2 *de; int err; - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; + int ret = 0; int dir_has_error = 0; if (is_dx_dir(inode)) { - err = ext3_dx_readdir(file, ctx); - if (err != ERR_BAD_DX_DIR) - return err; + err = ext3_dx_readdir(filp, dirent, filldir); + if (err != ERR_BAD_DX_DIR) { + ret = err; + goto out; + } /* * We don't set the inode dirty flag since it's not * critical that it get flushed back to the disk. */ - EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL; + EXT3_I(file_inode(filp))->i_flags &= ~EXT3_INDEX_FL; } - offset = ctx->pos & (sb->s_blocksize - 1); + stored = 0; + offset = filp->f_pos & (sb->s_blocksize - 1); - while (ctx->pos < inode->i_size) { - unsigned long blk = ctx->pos >> EXT3_BLOCK_SIZE_BITS(sb); + while (!error && !stored && filp->f_pos < inode->i_size) { + unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb); struct buffer_head map_bh; struct buffer_head *bh = NULL; @@ -122,12 +129,12 @@ static int ext3_readdir(struct file *file, struct dir_context *ctx) if (err > 0) { pgoff_t index = map_bh.b_blocknr >> (PAGE_CACHE_SHIFT - inode->i_blkbits); - if (!ra_has_index(&file->f_ra, index)) + if (!ra_has_index(&filp->f_ra, index)) page_cache_sync_readahead( sb->s_bdev->bd_inode->i_mapping, - &file->f_ra, file, + &filp->f_ra, filp, index, 1); - file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; + filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; bh = ext3_bread(NULL, inode, blk, 0, &err); } @@ -139,21 +146,22 @@ static int ext3_readdir(struct file *file, struct dir_context *ctx) if (!dir_has_error) { ext3_error(sb, __func__, "directory #%lu " "contains a hole at offset %lld", - inode->i_ino, ctx->pos); + inode->i_ino, filp->f_pos); dir_has_error = 1; } /* corrupt size? Maybe no more blocks to read */ - if (ctx->pos > inode->i_blocks << 9) + if (filp->f_pos > inode->i_blocks << 9) break; - ctx->pos += sb->s_blocksize - offset; + filp->f_pos += sb->s_blocksize - offset; continue; } +revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block * to make sure. */ - if (offset && file->f_version != inode->i_version) { + if (filp->f_version != inode->i_version) { for (i = 0; i < sb->s_blocksize && i < offset; ) { de = (struct ext3_dir_entry_2 *) (bh->b_data + i); @@ -169,40 +177,53 @@ static int ext3_readdir(struct file *file, struct dir_context *ctx) i += ext3_rec_len_from_disk(de->rec_len); } offset = i; - ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1)) + filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1)) | offset; - file->f_version = inode->i_version; + filp->f_version = inode->i_version; } - while (ctx->pos < inode->i_size + while (!error && filp->f_pos < inode->i_size && offset < sb->s_blocksize) { de = (struct ext3_dir_entry_2 *) (bh->b_data + offset); if (!ext3_check_dir_entry ("ext3_readdir", inode, de, bh, offset)) { - /* On error, skip the to the + /* On error, skip the f_pos to the next block. */ - ctx->pos = (ctx->pos | + filp->f_pos = (filp->f_pos | (sb->s_blocksize - 1)) + 1; - break; + brelse (bh); + ret = stored; + goto out; } offset += ext3_rec_len_from_disk(de->rec_len); if (le32_to_cpu(de->inode)) { - if (!dir_emit(ctx, de->name, de->name_len, - le32_to_cpu(de->inode), - get_dtype(sb, de->file_type))) { - brelse(bh); - return 0; - } + /* We might block in the next section + * if the data destination is + * currently swapped out. So, use a + * version stamp to detect whether or + * not the directory has been modified + * during the copy operation. + */ + u64 version = filp->f_version; + + error = filldir(dirent, de->name, + de->name_len, + filp->f_pos, + le32_to_cpu(de->inode), + get_dtype(sb, de->file_type)); + if (error) + break; + if (version != filp->f_version) + goto revalidate; + stored ++; } - ctx->pos += ext3_rec_len_from_disk(de->rec_len); + filp->f_pos += ext3_rec_len_from_disk(de->rec_len); } offset = 0; brelse (bh); - if (ctx->pos < inode->i_size) - if (!dir_relax(inode)) - return 0; } - return 0; +out: + return ret; } static inline int is_32bit_api(void) @@ -431,54 +452,62 @@ int ext3_htree_store_dirent(struct file *dir_file, __u32 hash, * for all entres on the fname linked list. (Normally there is only * one entry on the linked list, unless there are 62 bit hash collisions.) */ -static bool call_filldir(struct file *file, struct dir_context *ctx, - struct fname *fname) +static int call_filldir(struct file * filp, void * dirent, + filldir_t filldir, struct fname *fname) { - struct dir_private_info *info = file->private_data; - struct inode *inode = file_inode(file); - struct super_block *sb = inode->i_sb; + struct dir_private_info *info = filp->private_data; + loff_t curr_pos; + struct inode *inode = file_inode(filp); + struct super_block * sb; + int error; + + sb = inode->i_sb; if (!fname) { printk("call_filldir: called with null fname?!?\n"); - return true; + return 0; } - ctx->pos = hash2pos(file, fname->hash, fname->minor_hash); + curr_pos = hash2pos(filp, fname->hash, fname->minor_hash); while (fname) { - if (!dir_emit(ctx, fname->name, fname->name_len, + error = filldir(dirent, fname->name, + fname->name_len, curr_pos, fname->inode, - get_dtype(sb, fname->file_type))) { + get_dtype(sb, fname->file_type)); + if (error) { + filp->f_pos = curr_pos; info->extra_fname = fname; - return false; + return error; } fname = fname->next; } - return true; + return 0; } -static int ext3_dx_readdir(struct file *file, struct dir_context *ctx) +static int ext3_dx_readdir(struct file * filp, + void * dirent, filldir_t filldir) { - struct dir_private_info *info = file->private_data; - struct inode *inode = file_inode(file); + struct dir_private_info *info = filp->private_data; + struct inode *inode = file_inode(filp); struct fname *fname; int ret; if (!info) { - info = ext3_htree_create_dir_info(file, ctx->pos); + info = ext3_htree_create_dir_info(filp, filp->f_pos); if (!info) return -ENOMEM; - file->private_data = info; + filp->private_data = info; } - if (ctx->pos == ext3_get_htree_eof(file)) + if (filp->f_pos == ext3_get_htree_eof(filp)) return 0; /* EOF */ /* Some one has messed with f_pos; reset the world */ - if (info->last_pos != ctx->pos) { + if (info->last_pos != filp->f_pos) { free_rb_tree_fname(&info->root); info->curr_node = NULL; info->extra_fname = NULL; - info->curr_hash = pos2maj_hash(file, ctx->pos); - info->curr_minor_hash = pos2min_hash(file, ctx->pos); + info->curr_hash = pos2maj_hash(filp, filp->f_pos); + info->curr_minor_hash = pos2min_hash(filp, filp->f_pos); } /* @@ -486,7 +515,7 @@ static int ext3_dx_readdir(struct file *file, struct dir_context *ctx) * chain, return them first. */ if (info->extra_fname) { - if (!call_filldir(file, ctx, info->extra_fname)) + if (call_filldir(filp, dirent, filldir, info->extra_fname)) goto finished; info->extra_fname = NULL; goto next_node; @@ -500,17 +529,17 @@ static int ext3_dx_readdir(struct file *file, struct dir_context *ctx) * cached entries. */ if ((!info->curr_node) || - (file->f_version != inode->i_version)) { + (filp->f_version != inode->i_version)) { info->curr_node = NULL; free_rb_tree_fname(&info->root); - file->f_version = inode->i_version; - ret = ext3_htree_fill_tree(file, info->curr_hash, + filp->f_version = inode->i_version; + ret = ext3_htree_fill_tree(filp, info->curr_hash, info->curr_minor_hash, &info->next_hash); if (ret < 0) return ret; if (ret == 0) { - ctx->pos = ext3_get_htree_eof(file); + filp->f_pos = ext3_get_htree_eof(filp); break; } info->curr_node = rb_first(&info->root); @@ -519,7 +548,7 @@ static int ext3_dx_readdir(struct file *file, struct dir_context *ctx) fname = rb_entry(info->curr_node, struct fname, rb_hash); info->curr_hash = fname->hash; info->curr_minor_hash = fname->minor_hash; - if (!call_filldir(file, ctx, fname)) + if (call_filldir(filp, dirent, filldir, fname)) break; next_node: info->curr_node = rb_next(info->curr_node); @@ -530,7 +559,7 @@ static int ext3_dx_readdir(struct file *file, struct dir_context *ctx) info->curr_minor_hash = fname->minor_hash; } else { if (info->next_hash == ~0) { - ctx->pos = ext3_get_htree_eof(file); + filp->f_pos = ext3_get_htree_eof(filp); break; } info->curr_hash = info->next_hash; @@ -538,7 +567,7 @@ static int ext3_dx_readdir(struct file *file, struct dir_context *ctx) } } finished: - info->last_pos = ctx->pos; + info->last_pos = filp->f_pos; return 0; } @@ -553,7 +582,7 @@ static int ext3_release_dir (struct inode * inode, struct file * filp) const struct file_operations ext3_dir_operations = { .llseek = ext3_dir_llseek, .read = generic_read_dir, - .iterate = ext3_readdir, + .readdir = ext3_readdir, .unlocked_ioctl = ext3_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext3_compat_ioctl, diff --git a/trunk/fs/ext3/inode.c b/trunk/fs/ext3/inode.c index f67668f724ba..23c712825640 100644 --- a/trunk/fs/ext3/inode.c +++ b/trunk/fs/ext3/inode.c @@ -1825,20 +1825,19 @@ ext3_readpages(struct file *file, struct address_space *mapping, return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); } -static void ext3_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void ext3_invalidatepage(struct page *page, unsigned long offset) { journal_t *journal = EXT3_JOURNAL(page->mapping->host); - trace_ext3_invalidatepage(page, offset, length); + trace_ext3_invalidatepage(page, offset); /* * If it's a full truncate we just forget about the pending dirtying */ - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0) ClearPageChecked(page); - journal_invalidatepage(journal, page, offset, length); + journal_invalidatepage(journal, page, offset); } static int ext3_releasepage(struct page *page, gfp_t wait) diff --git a/trunk/fs/ext3/namei.c b/trunk/fs/ext3/namei.c index cea8ecf3e76e..692de13e3596 100644 --- a/trunk/fs/ext3/namei.c +++ b/trunk/fs/ext3/namei.c @@ -576,8 +576,11 @@ static int htree_dirblock_to_tree(struct file *dir_file, if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh, (block<i_sb)) +((char *)de - bh->b_data))) { - /* silently ignore the rest of the block */ - break; + /* On error, skip the f_pos to the next block. */ + dir_file->f_pos = (dir_file->f_pos | + (dir->i_sb->s_blocksize - 1)) + 1; + brelse (bh); + return count; } ext3fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || diff --git a/trunk/fs/ext4/balloc.c b/trunk/fs/ext4/balloc.c index 58339393fa6e..d0f13eada0ed 100644 --- a/trunk/fs/ext4/balloc.c +++ b/trunk/fs/ext4/balloc.c @@ -682,15 +682,11 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) static inline int test_root(ext4_group_t a, int b) { - while (1) { - if (a < b) - return 0; - if (a == b) - return 1; - if ((a % b) != 0) - return 0; - a = a / b; - } + int num = b; + + while (a > num) + num *= b; + return num == a; } static int ext4_group_sparse(ext4_group_t group) diff --git a/trunk/fs/ext4/dir.c b/trunk/fs/ext4/dir.c index 3c7d288ae94c..f8d56e4254e0 100644 --- a/trunk/fs/ext4/dir.c +++ b/trunk/fs/ext4/dir.c @@ -29,7 +29,8 @@ #include "ext4.h" #include "xattr.h" -static int ext4_dx_readdir(struct file *, struct dir_context *); +static int ext4_dx_readdir(struct file *filp, + void *dirent, filldir_t filldir); /** * Check if the given dir-inode refers to an htree-indexed directory @@ -102,56 +103,60 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, return 1; } -static int ext4_readdir(struct file *file, struct dir_context *ctx) +static int ext4_readdir(struct file *filp, + void *dirent, filldir_t filldir) { + int error = 0; unsigned int offset; int i, stored; struct ext4_dir_entry_2 *de; int err; - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; + int ret = 0; int dir_has_error = 0; if (is_dx_dir(inode)) { - err = ext4_dx_readdir(file, ctx); + err = ext4_dx_readdir(filp, dirent, filldir); if (err != ERR_BAD_DX_DIR) { - return err; + ret = err; + goto out; } /* * We don't set the inode dirty flag since it's not * critical that it get flushed back to the disk. */ - ext4_clear_inode_flag(file_inode(file), + ext4_clear_inode_flag(file_inode(filp), EXT4_INODE_INDEX); } if (ext4_has_inline_data(inode)) { int has_inline_data = 1; - int ret = ext4_read_inline_dir(file, ctx, + ret = ext4_read_inline_dir(filp, dirent, filldir, &has_inline_data); if (has_inline_data) return ret; } stored = 0; - offset = ctx->pos & (sb->s_blocksize - 1); + offset = filp->f_pos & (sb->s_blocksize - 1); - while (ctx->pos < inode->i_size) { + while (!error && !stored && filp->f_pos < inode->i_size) { struct ext4_map_blocks map; struct buffer_head *bh = NULL; - map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb); + map.m_lblk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb); map.m_len = 1; err = ext4_map_blocks(NULL, inode, &map, 0); if (err > 0) { pgoff_t index = map.m_pblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); - if (!ra_has_index(&file->f_ra, index)) + if (!ra_has_index(&filp->f_ra, index)) page_cache_sync_readahead( sb->s_bdev->bd_inode->i_mapping, - &file->f_ra, file, + &filp->f_ra, filp, index, 1); - file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; + filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err); } @@ -161,16 +166,16 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) */ if (!bh) { if (!dir_has_error) { - EXT4_ERROR_FILE(file, 0, + EXT4_ERROR_FILE(filp, 0, "directory contains a " "hole at offset %llu", - (unsigned long long) ctx->pos); + (unsigned long long) filp->f_pos); dir_has_error = 1; } /* corrupt size? Maybe no more blocks to read */ - if (ctx->pos > inode->i_blocks << 9) + if (filp->f_pos > inode->i_blocks << 9) break; - ctx->pos += sb->s_blocksize - offset; + filp->f_pos += sb->s_blocksize - offset; continue; } @@ -178,20 +183,21 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) if (!buffer_verified(bh) && !ext4_dirent_csum_verify(inode, (struct ext4_dir_entry *)bh->b_data)) { - EXT4_ERROR_FILE(file, 0, "directory fails checksum " + EXT4_ERROR_FILE(filp, 0, "directory fails checksum " "at offset %llu", - (unsigned long long)ctx->pos); - ctx->pos += sb->s_blocksize - offset; + (unsigned long long)filp->f_pos); + filp->f_pos += sb->s_blocksize - offset; brelse(bh); continue; } set_buffer_verified(bh); +revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block * to make sure. */ - if (file->f_version != inode->i_version) { + if (filp->f_version != inode->i_version) { for (i = 0; i < sb->s_blocksize && i < offset; ) { de = (struct ext4_dir_entry_2 *) (bh->b_data + i); @@ -208,46 +214,57 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) sb->s_blocksize); } offset = i; - ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1)) + filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1)) | offset; - file->f_version = inode->i_version; + filp->f_version = inode->i_version; } - while (ctx->pos < inode->i_size + while (!error && filp->f_pos < inode->i_size && offset < sb->s_blocksize) { de = (struct ext4_dir_entry_2 *) (bh->b_data + offset); - if (ext4_check_dir_entry(inode, file, de, bh, + if (ext4_check_dir_entry(inode, filp, de, bh, bh->b_data, bh->b_size, offset)) { /* - * On error, skip to the next block + * On error, skip the f_pos to the next block */ - ctx->pos = (ctx->pos | + filp->f_pos = (filp->f_pos | (sb->s_blocksize - 1)) + 1; - break; + brelse(bh); + ret = stored; + goto out; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); if (le32_to_cpu(de->inode)) { - if (!dir_emit(ctx, de->name, + /* We might block in the next section + * if the data destination is + * currently swapped out. So, use a + * version stamp to detect whether or + * not the directory has been modified + * during the copy operation. + */ + u64 version = filp->f_version; + + error = filldir(dirent, de->name, de->name_len, + filp->f_pos, le32_to_cpu(de->inode), - get_dtype(sb, de->file_type))) { - brelse(bh); - return 0; - } + get_dtype(sb, de->file_type)); + if (error) + break; + if (version != filp->f_version) + goto revalidate; + stored++; } - ctx->pos += ext4_rec_len_from_disk(de->rec_len, + filp->f_pos += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); } offset = 0; brelse(bh); - if (ctx->pos < inode->i_size) { - if (!dir_relax(inode)) - return 0; - } } - return 0; +out: + return ret; } static inline int is_32bit_api(void) @@ -475,12 +492,16 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, * for all entres on the fname linked list. (Normally there is only * one entry on the linked list, unless there are 62 bit hash collisions.) */ -static int call_filldir(struct file *file, struct dir_context *ctx, - struct fname *fname) +static int call_filldir(struct file *filp, void *dirent, + filldir_t filldir, struct fname *fname) { - struct dir_private_info *info = file->private_data; - struct inode *inode = file_inode(file); - struct super_block *sb = inode->i_sb; + struct dir_private_info *info = filp->private_data; + loff_t curr_pos; + struct inode *inode = file_inode(filp); + struct super_block *sb; + int error; + + sb = inode->i_sb; if (!fname) { ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: comm %s: " @@ -488,44 +509,47 @@ static int call_filldir(struct file *file, struct dir_context *ctx, inode->i_ino, current->comm); return 0; } - ctx->pos = hash2pos(file, fname->hash, fname->minor_hash); + curr_pos = hash2pos(filp, fname->hash, fname->minor_hash); while (fname) { - if (!dir_emit(ctx, fname->name, - fname->name_len, + error = filldir(dirent, fname->name, + fname->name_len, curr_pos, fname->inode, - get_dtype(sb, fname->file_type))) { + get_dtype(sb, fname->file_type)); + if (error) { + filp->f_pos = curr_pos; info->extra_fname = fname; - return 1; + return error; } fname = fname->next; } return 0; } -static int ext4_dx_readdir(struct file *file, struct dir_context *ctx) +static int ext4_dx_readdir(struct file *filp, + void *dirent, filldir_t filldir) { - struct dir_private_info *info = file->private_data; - struct inode *inode = file_inode(file); + struct dir_private_info *info = filp->private_data; + struct inode *inode = file_inode(filp); struct fname *fname; int ret; if (!info) { - info = ext4_htree_create_dir_info(file, ctx->pos); + info = ext4_htree_create_dir_info(filp, filp->f_pos); if (!info) return -ENOMEM; - file->private_data = info; + filp->private_data = info; } - if (ctx->pos == ext4_get_htree_eof(file)) + if (filp->f_pos == ext4_get_htree_eof(filp)) return 0; /* EOF */ /* Some one has messed with f_pos; reset the world */ - if (info->last_pos != ctx->pos) { + if (info->last_pos != filp->f_pos) { free_rb_tree_fname(&info->root); info->curr_node = NULL; info->extra_fname = NULL; - info->curr_hash = pos2maj_hash(file, ctx->pos); - info->curr_minor_hash = pos2min_hash(file, ctx->pos); + info->curr_hash = pos2maj_hash(filp, filp->f_pos); + info->curr_minor_hash = pos2min_hash(filp, filp->f_pos); } /* @@ -533,7 +557,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx) * chain, return them first. */ if (info->extra_fname) { - if (call_filldir(file, ctx, info->extra_fname)) + if (call_filldir(filp, dirent, filldir, info->extra_fname)) goto finished; info->extra_fname = NULL; goto next_node; @@ -547,17 +571,17 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx) * cached entries. */ if ((!info->curr_node) || - (file->f_version != inode->i_version)) { + (filp->f_version != inode->i_version)) { info->curr_node = NULL; free_rb_tree_fname(&info->root); - file->f_version = inode->i_version; - ret = ext4_htree_fill_tree(file, info->curr_hash, + filp->f_version = inode->i_version; + ret = ext4_htree_fill_tree(filp, info->curr_hash, info->curr_minor_hash, &info->next_hash); if (ret < 0) return ret; if (ret == 0) { - ctx->pos = ext4_get_htree_eof(file); + filp->f_pos = ext4_get_htree_eof(filp); break; } info->curr_node = rb_first(&info->root); @@ -566,7 +590,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx) fname = rb_entry(info->curr_node, struct fname, rb_hash); info->curr_hash = fname->hash; info->curr_minor_hash = fname->minor_hash; - if (call_filldir(file, ctx, fname)) + if (call_filldir(filp, dirent, filldir, fname)) break; next_node: info->curr_node = rb_next(info->curr_node); @@ -577,7 +601,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx) info->curr_minor_hash = fname->minor_hash; } else { if (info->next_hash == ~0) { - ctx->pos = ext4_get_htree_eof(file); + filp->f_pos = ext4_get_htree_eof(filp); break; } info->curr_hash = info->next_hash; @@ -585,7 +609,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx) } } finished: - info->last_pos = ctx->pos; + info->last_pos = filp->f_pos; return 0; } @@ -600,7 +624,7 @@ static int ext4_release_dir(struct inode *inode, struct file *filp) const struct file_operations ext4_dir_operations = { .llseek = ext4_dir_llseek, .read = generic_read_dir, - .iterate = ext4_readdir, + .readdir = ext4_readdir, .unlocked_ioctl = ext4_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext4_compat_ioctl, diff --git a/trunk/fs/ext4/ext4.h b/trunk/fs/ext4/ext4.h index b577e45425b0..0aabb344b02e 100644 --- a/trunk/fs/ext4/ext4.h +++ b/trunk/fs/ext4/ext4.h @@ -176,23 +176,34 @@ struct ext4_map_blocks { unsigned int m_flags; }; +/* + * For delayed allocation tracking + */ +struct mpage_da_data { + struct inode *inode; + sector_t b_blocknr; /* start block number of extent */ + size_t b_size; /* size of extent */ + unsigned long b_state; /* state of the extent */ + unsigned long first_page, next_page; /* extent of pages */ + struct writeback_control *wbc; + int io_done; + int pages_written; + int retval; +}; + /* * Flags for ext4_io_end->flags */ #define EXT4_IO_END_UNWRITTEN 0x0001 -#define EXT4_IO_END_DIRECT 0x0002 +#define EXT4_IO_END_ERROR 0x0002 +#define EXT4_IO_END_DIRECT 0x0004 /* - * For converting uninitialized extents on a work queue. 'handle' is used for - * buffered writeback. + * For converting uninitialized extents on a work queue. */ typedef struct ext4_io_end { struct list_head list; /* per-file finished IO list */ - handle_t *handle; /* handle reserved for extent - * conversion */ struct inode *inode; /* file being written to */ - struct bio *bio; /* Linked list of completed - * bios covering the extent */ unsigned int flag; /* unwritten or not */ loff_t offset; /* offset in the file */ ssize_t size; /* size of the extent */ @@ -570,6 +581,11 @@ enum { #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020 +/* + * Flags used by ext4_discard_partial_page_buffers + */ +#define EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 0x0001 + /* * ioctl commands */ @@ -864,7 +880,6 @@ struct ext4_inode_info { rwlock_t i_es_lock; struct list_head i_es_lru; unsigned int i_es_lru_nr; /* protected by i_es_lock */ - unsigned long i_touch_when; /* jiffies of last accessing */ /* ialloc */ ext4_group_t i_last_alloc_group; @@ -889,22 +904,12 @@ struct ext4_inode_info { qsize_t i_reserved_quota; #endif - /* Lock protecting lists below */ + /* completed IOs that might need unwritten extents handling */ + struct list_head i_completed_io_list; spinlock_t i_completed_io_lock; - /* - * Completed IOs that need unwritten extents handling and have - * transaction reserved - */ - struct list_head i_rsv_conversion_list; - /* - * Completed IOs that need unwritten extents handling and don't have - * transaction reserved - */ - struct list_head i_unrsv_conversion_list; atomic_t i_ioend_count; /* Number of outstanding io_end structs */ atomic_t i_unwritten; /* Nr. of inflight conversions pending */ - struct work_struct i_rsv_conversion_work; - struct work_struct i_unrsv_conversion_work; + struct work_struct i_unwritten_work; /* deferred extent conversion */ spinlock_t i_block_reservation_lock; @@ -1241,6 +1246,7 @@ struct ext4_sb_info { unsigned int s_mb_stats; unsigned int s_mb_order2_reqs; unsigned int s_mb_group_prealloc; + unsigned int s_max_writeback_mb_bump; unsigned int s_max_dir_size_kb; /* where last allocation was done - for stream allocation */ unsigned long s_mb_last_group; @@ -1276,10 +1282,8 @@ struct ext4_sb_info { struct flex_groups *s_flex_groups; ext4_group_t s_flex_groups_allocated; - /* workqueue for unreserved extent convertions (dio) */ - struct workqueue_struct *unrsv_conversion_wq; - /* workqueue for reserved extent conversions (buffered io) */ - struct workqueue_struct *rsv_conversion_wq; + /* workqueue for dio unwritten */ + struct workqueue_struct *dio_unwritten_wq; /* timer for periodic error stats printing */ struct timer_list s_err_report; @@ -1304,7 +1308,6 @@ struct ext4_sb_info { /* Reclaim extents from extent status tree */ struct shrinker s_es_shrinker; struct list_head s_es_lru; - unsigned long s_es_last_sorted; struct percpu_counter s_extent_cache_cnt; spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp; }; @@ -1340,9 +1343,6 @@ static inline void ext4_set_io_unwritten_flag(struct inode *inode, struct ext4_io_end *io_end) { if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { - /* Writeback has to have coversion transaction reserved */ - WARN_ON(EXT4_SB(inode->i_sb)->s_journal && !io_end->handle && - !(io_end->flag & EXT4_IO_END_DIRECT)); io_end->flag |= EXT4_IO_END_UNWRITTEN; atomic_inc(&EXT4_I(inode)->i_unwritten); } @@ -2000,6 +2000,7 @@ static inline unsigned char get_dtype(struct super_block *sb, int filetype) /* fsync.c */ extern int ext4_sync_file(struct file *, loff_t, loff_t, int); +extern int ext4_flush_unwritten_io(struct inode *); /* hash.c */ extern int ext4fs_dirhash(const char *name, int len, struct @@ -2088,7 +2089,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int); extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); extern int ext4_can_truncate(struct inode *inode); extern void ext4_truncate(struct inode *); -extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); +extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length); extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks); extern void ext4_set_inode_flags(struct inode *); extern void ext4_get_inode_flags(struct ext4_inode_info *); @@ -2096,12 +2097,9 @@ extern int ext4_alloc_da_blocks(struct inode *inode); extern void ext4_set_aops(struct inode *inode); extern int ext4_writepage_trans_blocks(struct inode *); extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); -extern int ext4_block_truncate_page(handle_t *handle, - struct address_space *mapping, loff_t from); -extern int ext4_block_zero_page_range(handle_t *handle, - struct address_space *mapping, loff_t from, loff_t length); -extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, - loff_t lstart, loff_t lend); +extern int ext4_discard_partial_page_buffers(handle_t *handle, + struct address_space *mapping, loff_t from, + loff_t length, int flags); extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern qsize_t *ext4_get_reserved_space(struct inode *inode); extern void ext4_da_update_reserve_space(struct inode *inode, @@ -2114,7 +2112,7 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs); extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); -extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks); +extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk); extern void ext4_ind_truncate(handle_t *, struct inode *inode); extern int ext4_free_hole_blocks(handle_t *handle, struct inode *inode, ext4_lblk_t first, ext4_lblk_t stop); @@ -2169,96 +2167,42 @@ extern int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup); extern const char *ext4_decode_error(struct super_block *sb, int errno, char nbuf[16]); - extern __printf(4, 5) void __ext4_error(struct super_block *, const char *, unsigned int, const char *, ...); +#define ext4_error(sb, message...) __ext4_error(sb, __func__, \ + __LINE__, ## message) extern __printf(5, 6) -void __ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t, +void ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t, const char *, ...); extern __printf(5, 6) -void __ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t, +void ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t, const char *, ...); extern void __ext4_std_error(struct super_block *, const char *, unsigned int, int); extern __printf(4, 5) void __ext4_abort(struct super_block *, const char *, unsigned int, const char *, ...); +#define ext4_abort(sb, message...) __ext4_abort(sb, __func__, \ + __LINE__, ## message) extern __printf(4, 5) void __ext4_warning(struct super_block *, const char *, unsigned int, const char *, ...); +#define ext4_warning(sb, message...) __ext4_warning(sb, __func__, \ + __LINE__, ## message) extern __printf(3, 4) -void __ext4_msg(struct super_block *, const char *, const char *, ...); +void ext4_msg(struct super_block *, const char *, const char *, ...); extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp, const char *, unsigned int, const char *); +#define dump_mmp_msg(sb, mmp, msg) __dump_mmp_msg(sb, mmp, __func__, \ + __LINE__, msg) extern __printf(7, 8) void __ext4_grp_locked_error(const char *, unsigned int, struct super_block *, ext4_group_t, unsigned long, ext4_fsblk_t, const char *, ...); - -#ifdef CONFIG_PRINTK - -#define ext4_error_inode(inode, func, line, block, fmt, ...) \ - __ext4_error_inode(inode, func, line, block, fmt, ##__VA_ARGS__) -#define ext4_error_file(file, func, line, block, fmt, ...) \ - __ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__) -#define ext4_error(sb, fmt, ...) \ - __ext4_error(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) -#define ext4_abort(sb, fmt, ...) \ - __ext4_abort(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) -#define ext4_warning(sb, fmt, ...) \ - __ext4_warning(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) -#define ext4_msg(sb, level, fmt, ...) \ - __ext4_msg(sb, level, fmt, ##__VA_ARGS__) -#define dump_mmp_msg(sb, mmp, msg) \ - __dump_mmp_msg(sb, mmp, __func__, __LINE__, msg) -#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...) \ - __ext4_grp_locked_error(__func__, __LINE__, sb, grp, ino, block, \ - fmt, ##__VA_ARGS__) - -#else - -#define ext4_error_inode(inode, func, line, block, fmt, ...) \ -do { \ - no_printk(fmt, ##__VA_ARGS__); \ - __ext4_error_inode(inode, "", 0, block, " "); \ -} while (0) -#define ext4_error_file(file, func, line, block, fmt, ...) \ -do { \ - no_printk(fmt, ##__VA_ARGS__); \ - __ext4_error_file(file, "", 0, block, " "); \ -} while (0) -#define ext4_error(sb, fmt, ...) \ -do { \ - no_printk(fmt, ##__VA_ARGS__); \ - __ext4_error(sb, "", 0, " "); \ -} while (0) -#define ext4_abort(sb, fmt, ...) \ -do { \ - no_printk(fmt, ##__VA_ARGS__); \ - __ext4_abort(sb, "", 0, " "); \ -} while (0) -#define ext4_warning(sb, fmt, ...) \ -do { \ - no_printk(fmt, ##__VA_ARGS__); \ - __ext4_warning(sb, "", 0, " "); \ -} while (0) -#define ext4_msg(sb, level, fmt, ...) \ -do { \ - no_printk(fmt, ##__VA_ARGS__); \ - __ext4_msg(sb, "", " "); \ -} while (0) -#define dump_mmp_msg(sb, mmp, msg) \ - __dump_mmp_msg(sb, mmp, "", 0, "") -#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...) \ -do { \ - no_printk(fmt, ##__VA_ARGS__); \ - __ext4_grp_locked_error("", 0, sb, grp, ino, block, " "); \ -} while (0) - -#endif - +#define ext4_grp_locked_error(sb, grp, message...) \ + __ext4_grp_locked_error(__func__, __LINE__, (sb), (grp), ## message) extern void ext4_update_dynamic_rev(struct super_block *sb); extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb, __u32 compat); @@ -2369,7 +2313,6 @@ struct ext4_group_info *ext4_get_group_info(struct super_block *sb, { struct ext4_group_info ***grp_info; long indexv, indexh; - BUG_ON(group >= EXT4_SB(sb)->s_groups_count); grp_info = EXT4_SB(sb)->s_group_info; indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); @@ -2573,7 +2516,7 @@ extern int ext4_try_create_inline_dir(handle_t *handle, struct inode *parent, struct inode *inode); extern int ext4_read_inline_dir(struct file *filp, - struct dir_context *ctx, + void *dirent, filldir_t filldir, int *has_inline_data); extern int htree_inlinedir_to_tree(struct file *dir_file, struct inode *dir, ext4_lblk_t block, @@ -2656,7 +2599,8 @@ struct ext4_extent; extern int ext4_ext_tree_init(handle_t *handle, struct inode *); extern int ext4_ext_writepage_trans_blocks(struct inode *, int); -extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents); +extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, + int chunk); extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags); extern void ext4_ext_truncate(handle_t *, struct inode *); @@ -2666,8 +2610,8 @@ extern void ext4_ext_init(struct super_block *); extern void ext4_ext_release(struct super_block *); extern long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len); -extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, - loff_t offset, ssize_t len); +extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, + ssize_t len); extern int ext4_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags); extern int ext4_ext_calc_metadata_amount(struct inode *inode, @@ -2708,14 +2652,14 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, /* page-io.c */ extern int __init ext4_init_pageio(void); extern void ext4_exit_pageio(void); +extern void ext4_ioend_shutdown(struct inode *); extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end); extern int ext4_put_io_end(ext4_io_end_t *io_end); extern void ext4_put_io_end_defer(ext4_io_end_t *io_end); extern void ext4_io_submit_init(struct ext4_io_submit *io, struct writeback_control *wbc); -extern void ext4_end_io_rsv_work(struct work_struct *work); -extern void ext4_end_io_unrsv_work(struct work_struct *work); +extern void ext4_end_io_work(struct work_struct *work); extern void ext4_io_submit(struct ext4_io_submit *io); extern int ext4_bio_write_page(struct ext4_io_submit *io, struct page *page, @@ -2728,17 +2672,20 @@ extern void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp); extern int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp); -/* - * Note that these flags will never ever appear in a buffer_head's state flag. - * See EXT4_MAP_... to see where this is used. - */ +/* BH_Uninit flag: blocks are allocated but uninitialized on disk */ enum ext4_state_bits { BH_Uninit /* blocks are allocated but uninitialized on disk */ - = BH_JBDPrivateStart, + = BH_JBDPrivateStart, BH_AllocFromCluster, /* allocated blocks were part of already - * allocated cluster. */ + * allocated cluster. Note that this flag will + * never, ever appear in a buffer_head's state + * flag. See EXT4_MAP_FROM_CLUSTER to see where + * this is used. */ }; +BUFFER_FNS(Uninit, uninit) +TAS_BUFFER_FNS(Uninit, uninit) + /* * Add new method to test whether block and inode bitmaps are properly * initialized. With uninit_bg reading the block from disk is not enough diff --git a/trunk/fs/ext4/ext4_jbd2.c b/trunk/fs/ext4/ext4_jbd2.c index 72a3600aedbd..451eb4045330 100644 --- a/trunk/fs/ext4/ext4_jbd2.c +++ b/trunk/fs/ext4/ext4_jbd2.c @@ -38,43 +38,31 @@ static void ext4_put_nojournal(handle_t *handle) /* * Wrappers for jbd2_journal_start/end. */ -static int ext4_journal_check_start(struct super_block *sb) +handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line, + int type, int nblocks) { journal_t *journal; might_sleep(); + + trace_ext4_journal_start(sb, nblocks, _RET_IP_); if (sb->s_flags & MS_RDONLY) - return -EROFS; + return ERR_PTR(-EROFS); + WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE); journal = EXT4_SB(sb)->s_journal; + if (!journal) + return ext4_get_nojournal(); /* * Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ - if (journal && is_journal_aborted(journal)) { + if (is_journal_aborted(journal)) { ext4_abort(sb, "Detected aborted journal"); - return -EROFS; + return ERR_PTR(-EROFS); } - return 0; -} - -handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line, - int type, int blocks, int rsv_blocks) -{ - journal_t *journal; - int err; - - trace_ext4_journal_start(sb, blocks, rsv_blocks, _RET_IP_); - err = ext4_journal_check_start(sb); - if (err < 0) - return ERR_PTR(err); - - journal = EXT4_SB(sb)->s_journal; - if (!journal) - return ext4_get_nojournal(); - return jbd2__journal_start(journal, blocks, rsv_blocks, GFP_NOFS, - type, line); + return jbd2__journal_start(journal, nblocks, GFP_NOFS, type, line); } int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) @@ -98,30 +86,6 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) return err; } -handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, - int type) -{ - struct super_block *sb; - int err; - - if (!ext4_handle_valid(handle)) - return ext4_get_nojournal(); - - sb = handle->h_journal->j_private; - trace_ext4_journal_start_reserved(sb, handle->h_buffer_credits, - _RET_IP_); - err = ext4_journal_check_start(sb); - if (err < 0) { - jbd2_journal_free_reserved(handle); - return ERR_PTR(err); - } - - err = jbd2_journal_start_reserved(handle, type, line); - if (err < 0) - return ERR_PTR(err); - return handle; -} - void ext4_journal_abort_handle(const char *caller, unsigned int line, const char *err_fn, struct buffer_head *bh, handle_t *handle, int err) diff --git a/trunk/fs/ext4/ext4_jbd2.h b/trunk/fs/ext4/ext4_jbd2.h index 2877258d9497..c8c6885406db 100644 --- a/trunk/fs/ext4/ext4_jbd2.h +++ b/trunk/fs/ext4/ext4_jbd2.h @@ -134,8 +134,7 @@ static inline int ext4_jbd2_credits_xattr(struct inode *inode) #define EXT4_HT_MIGRATE 8 #define EXT4_HT_MOVE_EXTENTS 9 #define EXT4_HT_XATTR 10 -#define EXT4_HT_EXT_CONVERT 11 -#define EXT4_HT_MAX 12 +#define EXT4_HT_MAX 11 /** * struct ext4_journal_cb_entry - Base structure for callback information. @@ -266,7 +265,7 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line, __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb)) handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line, - int type, int blocks, int rsv_blocks); + int type, int nblocks); int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle); #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096) @@ -301,37 +300,21 @@ static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed) } #define ext4_journal_start_sb(sb, type, nblocks) \ - __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0) + __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks)) #define ext4_journal_start(inode, type, nblocks) \ - __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0) - -#define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks) \ - __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks)) + __ext4_journal_start((inode), __LINE__, (type), (nblocks)) static inline handle_t *__ext4_journal_start(struct inode *inode, unsigned int line, int type, - int blocks, int rsv_blocks) + int nblocks) { - return __ext4_journal_start_sb(inode->i_sb, line, type, blocks, - rsv_blocks); + return __ext4_journal_start_sb(inode->i_sb, line, type, nblocks); } #define ext4_journal_stop(handle) \ __ext4_journal_stop(__func__, __LINE__, (handle)) -#define ext4_journal_start_reserved(handle, type) \ - __ext4_journal_start_reserved((handle), __LINE__, (type)) - -handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, - int type); - -static inline void ext4_journal_free_reserved(handle_t *handle) -{ - if (ext4_handle_valid(handle)) - jbd2_journal_free_reserved(handle); -} - static inline handle_t *ext4_journal_current_handle(void) { return journal_current_handle(); diff --git a/trunk/fs/ext4/extents.c b/trunk/fs/ext4/extents.c index 7097b0f680e6..107936db244e 100644 --- a/trunk/fs/ext4/extents.c +++ b/trunk/fs/ext4/extents.c @@ -2125,8 +2125,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode, next_del = ext4_find_delayed_extent(inode, &es); if (!exists && next_del) { exists = 1; - flags |= (FIEMAP_EXTENT_DELALLOC | - FIEMAP_EXTENT_UNKNOWN); + flags |= FIEMAP_EXTENT_DELALLOC; } up_read(&EXT4_I(inode)->i_data_sem); @@ -2329,15 +2328,17 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, } /* - * How many index/leaf blocks need to change/allocate to add @extents extents? + * How many index/leaf blocks need to change/allocate to modify nrblocks? * - * If we add a single extent, then in the worse case, each tree level - * index/leaf need to be changed in case of the tree split. + * if nrblocks are fit in a single extent (chunk flag is 1), then + * in the worse case, each tree level index/leaf need to be changed + * if the tree split due to insert a new extent, then the old tree + * index/leaf need to be updated too * - * If more extents are inserted, they could cause the whole tree split more - * than once, but this is really rare. + * If the nrblocks are discontiguous, they could cause + * the whole tree split more than once, but this is really rare. */ -int ext4_ext_index_trans_blocks(struct inode *inode, int extents) +int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) { int index; int depth; @@ -2348,7 +2349,7 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int extents) depth = ext_depth(inode); - if (extents <= 1) + if (chunk) index = depth * 2; else index = depth * 3; @@ -2356,24 +2357,20 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int extents) return index; } -static inline int get_default_free_blocks_flags(struct inode *inode) -{ - if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; - else if (ext4_should_journal_data(inode)) - return EXT4_FREE_BLOCKS_FORGET; - return 0; -} - static int ext4_remove_blocks(handle_t *handle, struct inode *inode, struct ext4_extent *ex, - long long *partial_cluster, + ext4_fsblk_t *partial_cluster, ext4_lblk_t from, ext4_lblk_t to) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); unsigned short ee_len = ext4_ext_get_actual_len(ex); ext4_fsblk_t pblk; - int flags = get_default_free_blocks_flags(inode); + int flags = 0; + + if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) + flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; + else if (ext4_should_journal_data(inode)) + flags |= EXT4_FREE_BLOCKS_FORGET; /* * For bigalloc file systems, we never free a partial cluster @@ -2391,8 +2388,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, * partial cluster here. */ pblk = ext4_ext_pblock(ex) + ee_len - 1; - if ((*partial_cluster > 0) && - (EXT4_B2C(sbi, pblk) != *partial_cluster)) { + if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) { ext4_free_blocks(handle, inode, NULL, EXT4_C2B(sbi, *partial_cluster), sbi->s_cluster_ratio, flags); @@ -2418,46 +2414,41 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { /* tail removal */ ext4_lblk_t num; - unsigned int unaligned; num = le32_to_cpu(ex->ee_block) + ee_len - from; pblk = ext4_ext_pblock(ex) + ee_len - num; - /* - * Usually we want to free partial cluster at the end of the - * extent, except for the situation when the cluster is still - * used by any other extent (partial_cluster is negative). - */ - if (*partial_cluster < 0 && - -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1)) - flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; - - ext_debug("free last %u blocks starting %llu partial %lld\n", - num, pblk, *partial_cluster); + ext_debug("free last %u blocks starting %llu\n", num, pblk); ext4_free_blocks(handle, inode, NULL, pblk, num, flags); /* * If the block range to be freed didn't start at the * beginning of a cluster, and we removed the entire - * extent and the cluster is not used by any other extent, - * save the partial cluster here, since we might need to - * delete if we determine that the truncate operation has - * removed all of the blocks in the cluster. - * - * On the other hand, if we did not manage to free the whole - * extent, we have to mark the cluster as used (store negative - * cluster number in partial_cluster). + * extent, save the partial cluster here, since we + * might need to delete if we determine that the + * truncate operation has removed all of the blocks in + * the cluster. */ - unaligned = pblk & (sbi->s_cluster_ratio - 1); - if (unaligned && (ee_len == num) && - (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) + if (pblk & (sbi->s_cluster_ratio - 1) && + (ee_len == num)) *partial_cluster = EXT4_B2C(sbi, pblk); - else if (unaligned) - *partial_cluster = -((long long)EXT4_B2C(sbi, pblk)); - else if (*partial_cluster > 0) + else *partial_cluster = 0; - } else - ext4_error(sbi->s_sb, "strange request: removal(2) " - "%u-%u from %u:%u\n", - from, to, le32_to_cpu(ex->ee_block), ee_len); + } else if (from == le32_to_cpu(ex->ee_block) + && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { + /* head removal */ + ext4_lblk_t num; + ext4_fsblk_t start; + + num = to - from; + start = ext4_ext_pblock(ex); + + ext_debug("free first %u blocks starting %llu\n", num, start); + ext4_free_blocks(handle, inode, NULL, start, num, flags); + + } else { + printk(KERN_INFO "strange request: removal(2) " + "%u-%u from %u:%u\n", + from, to, le32_to_cpu(ex->ee_block), ee_len); + } return 0; } @@ -2470,16 +2461,12 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, * @handle: The journal handle * @inode: The files inode * @path: The path to the leaf - * @partial_cluster: The cluster which we'll have to free if all extents - * has been released from it. It gets negative in case - * that the cluster is still used. * @start: The first block to remove * @end: The last block to remove */ static int ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, - struct ext4_ext_path *path, - long long *partial_cluster, + struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster, ext4_lblk_t start, ext4_lblk_t end) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); @@ -2492,7 +2479,6 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, unsigned short ex_ee_len; unsigned uninitialized = 0; struct ext4_extent *ex; - ext4_fsblk_t pblk; /* the header must be checked already in ext4_ext_remove_space() */ ext_debug("truncate since %u in leaf to %u\n", start, end); @@ -2504,9 +2490,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, return -EIO; } /* find where to start removing */ - ex = path[depth].p_ext; - if (!ex) - ex = EXT_LAST_EXTENT(eh); + ex = EXT_LAST_EXTENT(eh); ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); @@ -2533,16 +2517,6 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, /* If this extent is beyond the end of the hole, skip it */ if (end < ex_ee_block) { - /* - * We're going to skip this extent and move to another, - * so if this extent is not cluster aligned we have - * to mark the current cluster as used to avoid - * accidentally freeing it later on - */ - pblk = ext4_ext_pblock(ex); - if (pblk & (sbi->s_cluster_ratio - 1)) - *partial_cluster = - -((long long)EXT4_B2C(sbi, pblk)); ex--; ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); @@ -2618,7 +2592,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, sizeof(struct ext4_extent)); } le16_add_cpu(&eh->eh_entries, -1); - } else if (*partial_cluster > 0) + } else *partial_cluster = 0; err = ext4_ext_dirty(handle, inode, path + depth); @@ -2636,13 +2610,17 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, err = ext4_ext_correct_indexes(handle, inode, path); /* - * Free the partial cluster only if the current extent does not - * reference it. Otherwise we might free used cluster. + * If there is still a entry in the leaf node, check to see if + * it references the partial cluster. This is the only place + * where it could; if it doesn't, we can free the cluster. */ - if (*partial_cluster > 0 && + if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) && (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != *partial_cluster)) { - int flags = get_default_free_blocks_flags(inode); + int flags = EXT4_FREE_BLOCKS_FORGET; + + if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) + flags |= EXT4_FREE_BLOCKS_METADATA; ext4_free_blocks(handle, inode, NULL, EXT4_C2B(sbi, *partial_cluster), @@ -2686,7 +2664,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, struct super_block *sb = inode->i_sb; int depth = ext_depth(inode); struct ext4_ext_path *path = NULL; - long long partial_cluster = 0; + ext4_fsblk_t partial_cluster = 0; handle_t *handle; int i = 0, err = 0; @@ -2698,7 +2676,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, return PTR_ERR(handle); again: - trace_ext4_ext_remove_space(inode, start, end, depth); + trace_ext4_ext_remove_space(inode, start, depth); /* * Check if we are removing extents inside the extent tree. If that @@ -2866,14 +2844,17 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, } } - trace_ext4_ext_remove_space_done(inode, start, end, depth, - partial_cluster, path->p_hdr->eh_entries); + trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster, + path->p_hdr->eh_entries); /* If we still have something in the partial cluster and we have removed * even the first extent, then we should free the blocks in the partial * cluster as well. */ - if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) { - int flags = get_default_free_blocks_flags(inode); + if (partial_cluster && path->p_hdr->eh_entries == 0) { + int flags = EXT4_FREE_BLOCKS_FORGET; + + if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) + flags |= EXT4_FREE_BLOCKS_METADATA; ext4_free_blocks(handle, inode, NULL, EXT4_C2B(EXT4_SB(sb), partial_cluster), @@ -3661,7 +3642,7 @@ int ext4_find_delalloc_range(struct inode *inode, { struct extent_status es; - ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es); + ext4_es_find_delayed_extent(inode, lblk_start, &es); if (es.es_len == 0) return 0; /* there is no delay extent in this tree */ else if (es.es_lblk <= lblk_start && @@ -4382,7 +4363,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, } out3: - trace_ext4_ext_map_blocks_exit(inode, flags, map, err ? err : allocated); + trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated); return err ? err : allocated; } @@ -4465,7 +4446,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) return -EOPNOTSUPP; if (mode & FALLOC_FL_PUNCH_HOLE) - return ext4_punch_hole(inode, offset, len); + return ext4_punch_hole(file, offset, len); ret = ext4_convert_inline_data(inode); if (ret) @@ -4567,9 +4548,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) * function, to convert the fallocated extents after IO is completed. * Returns 0 on success. */ -int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, - loff_t offset, ssize_t len) +int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, + ssize_t len) { + handle_t *handle; unsigned int max_blocks; int ret = 0; int ret2 = 0; @@ -4584,32 +4566,16 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - map.m_lblk); /* - * This is somewhat ugly but the idea is clear: When transaction is - * reserved, everything goes into it. Otherwise we rather start several - * smaller transactions for conversion of each extent separately. + * credits to insert 1 extent into extent tree */ - if (handle) { - handle = ext4_journal_start_reserved(handle, - EXT4_HT_EXT_CONVERT); - if (IS_ERR(handle)) - return PTR_ERR(handle); - credits = 0; - } else { - /* - * credits to insert 1 extent into extent tree - */ - credits = ext4_chunk_trans_blocks(inode, max_blocks); - } + credits = ext4_chunk_trans_blocks(inode, max_blocks); while (ret >= 0 && ret < max_blocks) { map.m_lblk += ret; map.m_len = (max_blocks -= ret); - if (credits) { - handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, - credits); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - break; - } + handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + break; } ret = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_IO_CONVERT_EXT); @@ -4620,13 +4586,10 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, inode->i_ino, map.m_lblk, map.m_len, ret); ext4_mark_inode_dirty(handle, inode); - if (credits) - ret2 = ext4_journal_stop(handle); - if (ret <= 0 || ret2) + ret2 = ext4_journal_stop(handle); + if (ret <= 0 || ret2 ) break; } - if (!credits) - ret2 = ext4_journal_stop(handle); return ret > 0 ? ret2 : ret; } @@ -4645,10 +4608,9 @@ static int ext4_find_delayed_extent(struct inode *inode, struct extent_status es; ext4_lblk_t block, next_del; - if (newes->es_pblk == 0) { - ext4_es_find_delayed_extent_range(inode, newes->es_lblk, - newes->es_lblk + newes->es_len - 1, &es); + ext4_es_find_delayed_extent(inode, newes->es_lblk, &es); + if (newes->es_pblk == 0) { /* * No extent in extent-tree contains block @newes->es_pblk, * then the block may stay in 1)a hole or 2)delayed-extent. @@ -4668,7 +4630,7 @@ static int ext4_find_delayed_extent(struct inode *inode, } block = newes->es_lblk + newes->es_len; - ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es); + ext4_es_find_delayed_extent(inode, block, &es); if (es.es_len == 0) next_del = EXT_MAX_BLOCKS; else @@ -4696,7 +4658,7 @@ static int ext4_xattr_fiemap(struct inode *inode, error = ext4_get_inode_loc(inode, &iloc); if (error) return error; - physical = (__u64)iloc.bh->b_blocknr << blockbits; + physical = iloc.bh->b_blocknr << blockbits; offset = EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize; physical += offset; @@ -4704,7 +4666,7 @@ static int ext4_xattr_fiemap(struct inode *inode, flags |= FIEMAP_EXTENT_DATA_INLINE; brelse(iloc.bh); } else { /* external block */ - physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; + physical = EXT4_I(inode)->i_file_acl << blockbits; length = inode->i_sb->s_blocksize; } diff --git a/trunk/fs/ext4/extents_status.c b/trunk/fs/ext4/extents_status.c index ee018d5f397e..fe3337a85ede 100644 --- a/trunk/fs/ext4/extents_status.c +++ b/trunk/fs/ext4/extents_status.c @@ -10,7 +10,6 @@ * Ext4 extents status tree core functions. */ #include -#include #include "ext4.h" #include "extents_status.h" #include "ext4_extents.h" @@ -233,16 +232,14 @@ static struct extent_status *__es_tree_search(struct rb_root *root, } /* - * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering - * @es->lblk if it exists, otherwise, the next extent after @es->lblk. + * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk + * if it exists, otherwise, the next extent after @es->lblk. * * @inode: the inode which owns delayed extents * @lblk: the offset where we start to search - * @end: the offset where we stop to search * @es: delayed extent that we found */ -void ext4_es_find_delayed_extent_range(struct inode *inode, - ext4_lblk_t lblk, ext4_lblk_t end, +void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, struct extent_status *es) { struct ext4_es_tree *tree = NULL; @@ -250,8 +247,7 @@ void ext4_es_find_delayed_extent_range(struct inode *inode, struct rb_node *node; BUG_ON(es == NULL); - BUG_ON(end < lblk); - trace_ext4_es_find_delayed_extent_range_enter(inode, lblk); + trace_ext4_es_find_delayed_extent_enter(inode, lblk); read_lock(&EXT4_I(inode)->i_es_lock); tree = &EXT4_I(inode)->i_es_tree; @@ -274,10 +270,6 @@ void ext4_es_find_delayed_extent_range(struct inode *inode, if (es1 && !ext4_es_is_delayed(es1)) { while ((node = rb_next(&es1->rb_node)) != NULL) { es1 = rb_entry(node, struct extent_status, rb_node); - if (es1->es_lblk > end) { - es1 = NULL; - break; - } if (ext4_es_is_delayed(es1)) break; } @@ -292,7 +284,8 @@ void ext4_es_find_delayed_extent_range(struct inode *inode, read_unlock(&EXT4_I(inode)->i_es_lock); - trace_ext4_es_find_delayed_extent_range_exit(inode, es); + ext4_es_lru_add(inode); + trace_ext4_es_find_delayed_extent_exit(inode, es); } static struct extent_status * @@ -672,6 +665,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, error: write_unlock(&EXT4_I(inode)->i_es_lock); + ext4_es_lru_add(inode); ext4_es_print_tree(inode); return err; @@ -733,6 +727,7 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, read_unlock(&EXT4_I(inode)->i_es_lock); + ext4_es_lru_add(inode); trace_ext4_es_lookup_extent_exit(inode, es, found); return found; } @@ -876,28 +871,12 @@ int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex) EXTENT_STATUS_WRITTEN); } -static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a, - struct list_head *b) -{ - struct ext4_inode_info *eia, *eib; - eia = list_entry(a, struct ext4_inode_info, i_es_lru); - eib = list_entry(b, struct ext4_inode_info, i_es_lru); - - if (eia->i_touch_when == eib->i_touch_when) - return 0; - if (time_after(eia->i_touch_when, eib->i_touch_when)) - return 1; - else - return -1; -} - static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) { struct ext4_sb_info *sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); struct ext4_inode_info *ei; - struct list_head *cur, *tmp; - LIST_HEAD(skiped); + struct list_head *cur, *tmp, scanned; int nr_to_scan = sc->nr_to_scan; int ret, nr_shrunk = 0; @@ -907,41 +886,23 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) if (!nr_to_scan) return ret; - spin_lock(&sbi->s_es_lru_lock); - - /* - * If the inode that is at the head of LRU list is newer than - * last_sorted time, that means that we need to sort this list. - */ - ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, i_es_lru); - if (sbi->s_es_last_sorted < ei->i_touch_when) { - list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp); - sbi->s_es_last_sorted = jiffies; - } + INIT_LIST_HEAD(&scanned); + spin_lock(&sbi->s_es_lru_lock); list_for_each_safe(cur, tmp, &sbi->s_es_lru) { - /* - * If we have already reclaimed all extents from extent - * status tree, just stop the loop immediately. - */ - if (percpu_counter_read_positive(&sbi->s_extent_cache_cnt) == 0) - break; + list_move_tail(cur, &scanned); ei = list_entry(cur, struct ext4_inode_info, i_es_lru); - /* Skip the inode that is newer than the last_sorted time */ - if (sbi->s_es_last_sorted < ei->i_touch_when) { - list_move_tail(cur, &skiped); + read_lock(&ei->i_es_lock); + if (ei->i_es_lru_nr == 0) { + read_unlock(&ei->i_es_lock); continue; } - - if (ei->i_es_lru_nr == 0) - continue; + read_unlock(&ei->i_es_lock); write_lock(&ei->i_es_lock); ret = __es_try_to_reclaim_extents(ei, nr_to_scan); - if (ei->i_es_lru_nr == 0) - list_del_init(&ei->i_es_lru); write_unlock(&ei->i_es_lock); nr_shrunk += ret; @@ -949,9 +910,7 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) if (nr_to_scan == 0) break; } - - /* Move the newer inodes into the tail of the LRU list. */ - list_splice_tail(&skiped, &sbi->s_es_lru); + list_splice_tail(&scanned, &sbi->s_es_lru); spin_unlock(&sbi->s_es_lru_lock); ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); @@ -959,19 +918,21 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) return ret; } -void ext4_es_register_shrinker(struct ext4_sb_info *sbi) +void ext4_es_register_shrinker(struct super_block *sb) { + struct ext4_sb_info *sbi; + + sbi = EXT4_SB(sb); INIT_LIST_HEAD(&sbi->s_es_lru); spin_lock_init(&sbi->s_es_lru_lock); - sbi->s_es_last_sorted = 0; sbi->s_es_shrinker.shrink = ext4_es_shrink; sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; register_shrinker(&sbi->s_es_shrinker); } -void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) +void ext4_es_unregister_shrinker(struct super_block *sb) { - unregister_shrinker(&sbi->s_es_shrinker); + unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker); } void ext4_es_lru_add(struct inode *inode) @@ -979,14 +940,11 @@ void ext4_es_lru_add(struct inode *inode) struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - ei->i_touch_when = jiffies; - - if (!list_empty(&ei->i_es_lru)) - return; - spin_lock(&sbi->s_es_lru_lock); if (list_empty(&ei->i_es_lru)) list_add_tail(&ei->i_es_lru, &sbi->s_es_lru); + else + list_move_tail(&ei->i_es_lru, &sbi->s_es_lru); spin_unlock(&sbi->s_es_lru_lock); } diff --git a/trunk/fs/ext4/extents_status.h b/trunk/fs/ext4/extents_status.h index e936730cc5b0..d8e2d4dc311e 100644 --- a/trunk/fs/ext4/extents_status.h +++ b/trunk/fs/ext4/extents_status.h @@ -39,7 +39,6 @@ EXTENT_STATUS_DELAYED | \ EXTENT_STATUS_HOLE) -struct ext4_sb_info; struct ext4_extent; struct extent_status { @@ -63,8 +62,7 @@ extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, unsigned long long status); extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len); -extern void ext4_es_find_delayed_extent_range(struct inode *inode, - ext4_lblk_t lblk, ext4_lblk_t end, +extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, struct extent_status *es); extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, struct extent_status *es); @@ -120,8 +118,8 @@ static inline void ext4_es_store_status(struct extent_status *es, es->es_pblk = block; } -extern void ext4_es_register_shrinker(struct ext4_sb_info *sbi); -extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi); +extern void ext4_es_register_shrinker(struct super_block *sb); +extern void ext4_es_unregister_shrinker(struct super_block *sb); extern void ext4_es_lru_add(struct inode *inode); extern void ext4_es_lru_del(struct inode *inode); diff --git a/trunk/fs/ext4/file.c b/trunk/fs/ext4/file.c index b19f0a457f32..4959e29573b6 100644 --- a/trunk/fs/ext4/file.c +++ b/trunk/fs/ext4/file.c @@ -312,7 +312,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, blkbits = inode->i_sb->s_blocksize_bits; startoff = *offset; lastoff = startoff; - endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; + endoff = (map->m_lblk + map->m_len) << blkbits; index = startoff >> PAGE_CACHE_SHIFT; end = endoff >> PAGE_CACHE_SHIFT; @@ -457,7 +457,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) ret = ext4_map_blocks(NULL, inode, &map, 0); if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { if (last != start) - dataoff = (loff_t)last << blkbits; + dataoff = last << blkbits; break; } @@ -465,10 +465,10 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) * If there is a delay extent at this offset, * it will be as a data. */ - ext4_es_find_delayed_extent_range(inode, last, last, &es); + ext4_es_find_delayed_extent(inode, last, &es); if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { if (last != start) - dataoff = (loff_t)last << blkbits; + dataoff = last << blkbits; break; } @@ -486,7 +486,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) } last++; - dataoff = (loff_t)last << blkbits; + dataoff = last << blkbits; } while (last <= end); mutex_unlock(&inode->i_mutex); @@ -540,7 +540,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) ret = ext4_map_blocks(NULL, inode, &map, 0); if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { last += ret; - holeoff = (loff_t)last << blkbits; + holeoff = last << blkbits; continue; } @@ -548,10 +548,10 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) * If there is a delay extent at this offset, * we will skip this extent. */ - ext4_es_find_delayed_extent_range(inode, last, last, &es); + ext4_es_find_delayed_extent(inode, last, &es); if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { last = es.es_lblk + es.es_len; - holeoff = (loff_t)last << blkbits; + holeoff = last << blkbits; continue; } @@ -566,7 +566,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) &map, &holeoff); if (!unwritten) { last += ret; - holeoff = (loff_t)last << blkbits; + holeoff = last << blkbits; continue; } } diff --git a/trunk/fs/ext4/fsync.c b/trunk/fs/ext4/fsync.c index a8bc47f75fa0..e0ba8a408def 100644 --- a/trunk/fs/ext4/fsync.c +++ b/trunk/fs/ext4/fsync.c @@ -73,6 +73,32 @@ static int ext4_sync_parent(struct inode *inode) return ret; } +/** + * __sync_file - generic_file_fsync without the locking and filemap_write + * @inode: inode to sync + * @datasync: only sync essential metadata if true + * + * This is just generic_file_fsync without the locking. This is needed for + * nojournal mode to make sure this inodes data/metadata makes it to disk + * properly. The i_mutex should be held already. + */ +static int __sync_inode(struct inode *inode, int datasync) +{ + int err; + int ret; + + ret = sync_mapping_buffers(inode->i_mapping); + if (!(inode->i_state & I_DIRTY)) + return ret; + if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) + return ret; + + err = sync_inode_metadata(inode, 1); + if (ret == 0) + ret = err; + return ret; +} + /* * akpm: A new design for ext4_sync_file(). * @@ -90,7 +116,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) struct inode *inode = file->f_mapping->host; struct ext4_inode_info *ei = EXT4_I(inode); journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; - int ret = 0, err; + int ret, err; tid_t commit_tid; bool needs_barrier = false; @@ -98,24 +124,25 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) trace_ext4_sync_file_enter(file, datasync); - if (inode->i_sb->s_flags & MS_RDONLY) { - /* Make sure that we read updated s_mount_flags value */ - smp_rmb(); - if (EXT4_SB(inode->i_sb)->s_mount_flags & EXT4_MF_FS_ABORTED) - ret = -EROFS; + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); + + if (inode->i_sb->s_flags & MS_RDONLY) + goto out; + + ret = ext4_flush_unwritten_io(inode); + if (ret < 0) goto out; - } if (!journal) { - ret = generic_file_fsync(file, start, end, datasync); + ret = __sync_inode(inode, datasync); if (!ret && !hlist_empty(&inode->i_dentry)) ret = ext4_sync_parent(inode); goto out; } - ret = filemap_write_and_wait_range(inode->i_mapping, start, end); - if (ret) - return ret; /* * data=writeback,ordered: * The caller's filemap_fdatawrite()/wait will sync the data. @@ -145,7 +172,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) if (!ret) ret = err; } -out: + out: + mutex_unlock(&inode->i_mutex); trace_ext4_sync_file_exit(inode, ret); return ret; } diff --git a/trunk/fs/ext4/ialloc.c b/trunk/fs/ext4/ialloc.c index f03598c6ffd3..00a818d67b54 100644 --- a/trunk/fs/ext4/ialloc.c +++ b/trunk/fs/ext4/ialloc.c @@ -747,8 +747,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, if (!handle) { BUG_ON(nblocks <= 0); handle = __ext4_journal_start_sb(dir->i_sb, line_no, - handle_type, nblocks, - 0); + handle_type, nblocks); if (IS_ERR(handle)) { err = PTR_ERR(handle); ext4_std_error(sb, err); diff --git a/trunk/fs/ext4/indirect.c b/trunk/fs/ext4/indirect.c index 87b30cd357e7..b8d5d351e24f 100644 --- a/trunk/fs/ext4/indirect.c +++ b/trunk/fs/ext4/indirect.c @@ -624,7 +624,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, partial--; } out: - trace_ext4_ind_map_blocks_exit(inode, flags, map, err); + trace_ext4_ind_map_blocks_exit(inode, map, err); return err; } @@ -675,6 +675,11 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, retry: if (rw == READ && ext4_should_dioread_nolock(inode)) { + if (unlikely(atomic_read(&EXT4_I(inode)->i_unwritten))) { + mutex_lock(&inode->i_mutex); + ext4_flush_unwritten_io(inode); + mutex_unlock(&inode->i_mutex); + } /* * Nolock dioread optimization may be dynamically disabled * via ext4_inode_block_unlocked_dio(). Check inode's state @@ -774,18 +779,27 @@ int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; } -/* - * Calculate number of indirect blocks touched by mapping @nrblocks logically - * contiguous blocks - */ -int ext4_ind_trans_blocks(struct inode *inode, int nrblocks) +int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk) { + int indirects; + + /* if nrblocks are contiguous */ + if (chunk) { + /* + * With N contiguous data blocks, we need at most + * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, + * 2 dindirect blocks, and 1 tindirect block + */ + return DIV_ROUND_UP(nrblocks, + EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; + } /* - * With N contiguous data blocks, we need at most - * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, - * 2 dindirect blocks, and 1 tindirect block + * if nrblocks are not contiguous, worse case, each block touch + * a indirect block, and each indirect block touch a double indirect + * block, plus a triple indirect block */ - return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; + indirects = nrblocks * 2 + 1; + return indirects; } /* @@ -926,13 +940,11 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, __le32 *last) { __le32 *p; - int flags = EXT4_FREE_BLOCKS_VALIDATED; + int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; int err; if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA; - else if (ext4_should_journal_data(inode)) - flags |= EXT4_FREE_BLOCKS_FORGET; + flags |= EXT4_FREE_BLOCKS_METADATA; if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, count)) { diff --git a/trunk/fs/ext4/inline.c b/trunk/fs/ext4/inline.c index d9ecbf1113a7..3e2bf873e8a8 100644 --- a/trunk/fs/ext4/inline.c +++ b/trunk/fs/ext4/inline.c @@ -72,7 +72,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode, entry = (struct ext4_xattr_entry *) ((void *)raw_inode + EXT4_I(inode)->i_inline_off); - free += EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)); + free += le32_to_cpu(entry->e_value_size); goto out; } @@ -1404,15 +1404,16 @@ int htree_inlinedir_to_tree(struct file *dir_file, * offset as if '.' and '..' really take place. * */ -int ext4_read_inline_dir(struct file *file, - struct dir_context *ctx, +int ext4_read_inline_dir(struct file *filp, + void *dirent, filldir_t filldir, int *has_inline_data) { + int error = 0; unsigned int offset, parent_ino; - int i; + int i, stored; struct ext4_dir_entry_2 *de; struct super_block *sb; - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); int ret, inline_size = 0; struct ext4_iloc iloc; void *dir_buf = NULL; @@ -1443,8 +1444,9 @@ int ext4_read_inline_dir(struct file *file, goto out; sb = inode->i_sb; + stored = 0; parent_ino = le32_to_cpu(((struct ext4_dir_entry_2 *)dir_buf)->inode); - offset = ctx->pos; + offset = filp->f_pos; /* * dotdot_offset and dotdot_size is the real offset and @@ -1458,74 +1460,104 @@ int ext4_read_inline_dir(struct file *file, extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE; extra_size = extra_offset + inline_size; - /* - * If the version has changed since the last call to - * readdir(2), then we might be pointing to an invalid - * dirent right now. Scan from the start of the inline - * dir to make sure. - */ - if (file->f_version != inode->i_version) { - for (i = 0; i < extra_size && i < offset;) { - /* - * "." is with offset 0 and - * ".." is dotdot_offset. - */ - if (!i) { - i = dotdot_offset; - continue; - } else if (i == dotdot_offset) { - i = dotdot_size; - continue; + while (!error && !stored && filp->f_pos < extra_size) { +revalidate: + /* + * If the version has changed since the last call to + * readdir(2), then we might be pointing to an invalid + * dirent right now. Scan from the start of the inline + * dir to make sure. + */ + if (filp->f_version != inode->i_version) { + for (i = 0; i < extra_size && i < offset;) { + /* + * "." is with offset 0 and + * ".." is dotdot_offset. + */ + if (!i) { + i = dotdot_offset; + continue; + } else if (i == dotdot_offset) { + i = dotdot_size; + continue; + } + /* for other entry, the real offset in + * the buf has to be tuned accordingly. + */ + de = (struct ext4_dir_entry_2 *) + (dir_buf + i - extra_offset); + /* It's too expensive to do a full + * dirent test each time round this + * loop, but we do have to test at + * least that it is non-zero. A + * failure will be detected in the + * dirent test below. */ + if (ext4_rec_len_from_disk(de->rec_len, + extra_size) < EXT4_DIR_REC_LEN(1)) + break; + i += ext4_rec_len_from_disk(de->rec_len, + extra_size); } - /* for other entry, the real offset in - * the buf has to be tuned accordingly. - */ - de = (struct ext4_dir_entry_2 *) - (dir_buf + i - extra_offset); - /* It's too expensive to do a full - * dirent test each time round this - * loop, but we do have to test at - * least that it is non-zero. A - * failure will be detected in the - * dirent test below. */ - if (ext4_rec_len_from_disk(de->rec_len, extra_size) - < EXT4_DIR_REC_LEN(1)) - break; - i += ext4_rec_len_from_disk(de->rec_len, - extra_size); + offset = i; + filp->f_pos = offset; + filp->f_version = inode->i_version; } - offset = i; - ctx->pos = offset; - file->f_version = inode->i_version; - } - while (ctx->pos < extra_size) { - if (ctx->pos == 0) { - if (!dir_emit(ctx, ".", 1, inode->i_ino, DT_DIR)) - goto out; - ctx->pos = dotdot_offset; - continue; - } + while (!error && filp->f_pos < extra_size) { + if (filp->f_pos == 0) { + error = filldir(dirent, ".", 1, 0, inode->i_ino, + DT_DIR); + if (error) + break; + stored++; + filp->f_pos = dotdot_offset; + continue; + } - if (ctx->pos == dotdot_offset) { - if (!dir_emit(ctx, "..", 2, parent_ino, DT_DIR)) - goto out; - ctx->pos = dotdot_size; - continue; - } + if (filp->f_pos == dotdot_offset) { + error = filldir(dirent, "..", 2, + dotdot_offset, + parent_ino, DT_DIR); + if (error) + break; + stored++; - de = (struct ext4_dir_entry_2 *) - (dir_buf + ctx->pos - extra_offset); - if (ext4_check_dir_entry(inode, file, de, iloc.bh, dir_buf, - extra_size, ctx->pos)) - goto out; - if (le32_to_cpu(de->inode)) { - if (!dir_emit(ctx, de->name, de->name_len, - le32_to_cpu(de->inode), - get_dtype(sb, de->file_type))) + filp->f_pos = dotdot_size; + continue; + } + + de = (struct ext4_dir_entry_2 *) + (dir_buf + filp->f_pos - extra_offset); + if (ext4_check_dir_entry(inode, filp, de, + iloc.bh, dir_buf, + extra_size, filp->f_pos)) { + ret = stored; goto out; + } + if (le32_to_cpu(de->inode)) { + /* We might block in the next section + * if the data destination is + * currently swapped out. So, use a + * version stamp to detect whether or + * not the directory has been modified + * during the copy operation. + */ + u64 version = filp->f_version; + + error = filldir(dirent, de->name, + de->name_len, + filp->f_pos, + le32_to_cpu(de->inode), + get_dtype(sb, de->file_type)); + if (error) + break; + if (version != filp->f_version) + goto revalidate; + stored++; + } + filp->f_pos += ext4_rec_len_from_disk(de->rec_len, + extra_size); } - ctx->pos += ext4_rec_len_from_disk(de->rec_len, extra_size); } out: kfree(dir_buf); @@ -1810,7 +1842,7 @@ int ext4_inline_data_fiemap(struct inode *inode, if (error) goto out; - physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits; + physical = iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits; physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data; physical += offsetof(struct ext4_inode, i_block); length = i_size_read(inode); diff --git a/trunk/fs/ext4/inode.c b/trunk/fs/ext4/inode.c index 0188e65e1f58..0723774bdfb5 100644 --- a/trunk/fs/ext4/inode.c +++ b/trunk/fs/ext4/inode.c @@ -132,12 +132,12 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode, new_size); } -static void ext4_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); +static void ext4_invalidatepage(struct page *page, unsigned long offset); static int __ext4_journalled_writepage(struct page *page, unsigned int len); static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); -static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, - int pextents); +static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, + struct inode *inode, struct page *page, loff_t from, + loff_t length, int flags); /* * Test whether an inode is a fast symlink. @@ -215,8 +215,7 @@ void ext4_evict_inode(struct inode *inode) filemap_write_and_wait(&inode->i_data); } truncate_inode_pages(&inode->i_data, 0); - - WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); + ext4_ioend_shutdown(inode); goto no_delete; } @@ -226,8 +225,8 @@ void ext4_evict_inode(struct inode *inode) if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); truncate_inode_pages(&inode->i_data, 0); + ext4_ioend_shutdown(inode); - WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); if (is_bad_inode(inode)) goto no_delete; @@ -424,6 +423,66 @@ static int __check_block_validity(struct inode *inode, const char *func, #define check_block_validity(inode, map) \ __check_block_validity((inode), __func__, __LINE__, (map)) +/* + * Return the number of contiguous dirty pages in a given inode + * starting at page frame idx. + */ +static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, + unsigned int max_pages) +{ + struct address_space *mapping = inode->i_mapping; + pgoff_t index; + struct pagevec pvec; + pgoff_t num = 0; + int i, nr_pages, done = 0; + + if (max_pages == 0) + return 0; + pagevec_init(&pvec, 0); + while (!done) { + index = idx; + nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, + PAGECACHE_TAG_DIRTY, + (pgoff_t)PAGEVEC_SIZE); + if (nr_pages == 0) + break; + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + struct buffer_head *bh, *head; + + lock_page(page); + if (unlikely(page->mapping != mapping) || + !PageDirty(page) || + PageWriteback(page) || + page->index != idx) { + done = 1; + unlock_page(page); + break; + } + if (page_has_buffers(page)) { + bh = head = page_buffers(page); + do { + if (!buffer_delay(bh) && + !buffer_unwritten(bh)) + done = 1; + bh = bh->b_this_page; + } while (!done && (bh != head)); + } + unlock_page(page); + if (done) + break; + idx++; + num++; + if (num >= max_pages) { + done = 1; + break; + } + } + pagevec_release(&pvec); + } + return num; +} + #ifdef ES_AGGRESSIVE_TEST static void ext4_map_blocks_es_recheck(handle_t *handle, struct inode *inode, @@ -514,8 +573,6 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, "logical block %lu\n", inode->i_ino, flags, map->m_len, (unsigned long) map->m_lblk); - ext4_es_lru_add(inode); - /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { @@ -1061,13 +1118,10 @@ static int ext4_write_end(struct file *file, } } - if (ext4_has_inline_data(inode)) { - ret = ext4_write_inline_data_end(inode, pos, len, - copied, page); - if (ret < 0) - goto errout; - copied = ret; - } else + if (ext4_has_inline_data(inode)) + copied = ext4_write_inline_data_end(inode, pos, len, + copied, page); + else copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); @@ -1103,6 +1157,8 @@ static int ext4_write_end(struct file *file, if (i_size_changed) ext4_mark_inode_dirty(handle, inode); + if (copied < 0) + ret = copied; if (pos + len > inode->i_size && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside @@ -1359,28 +1415,21 @@ static void ext4_da_release_space(struct inode *inode, int to_free) } static void ext4_da_page_release_reservation(struct page *page, - unsigned int offset, - unsigned int length) + unsigned long offset) { int to_release = 0; struct buffer_head *head, *bh; unsigned int curr_off = 0; struct inode *inode = page->mapping->host; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - unsigned int stop = offset + length; int num_clusters; ext4_fsblk_t lblk; - BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); - head = page_buffers(page); bh = head; do { unsigned int next_off = curr_off + bh->b_size; - if (next_off > stop) - break; - if ((offset <= curr_off) && (buffer_delay(bh))) { to_release++; clear_buffer_delay(bh); @@ -1411,43 +1460,145 @@ static void ext4_da_page_release_reservation(struct page *page, * Delayed allocation stuff */ -struct mpage_da_data { - struct inode *inode; - struct writeback_control *wbc; +/* + * mpage_da_submit_io - walks through extent of pages and try to write + * them with writepage() call back + * + * @mpd->inode: inode + * @mpd->first_page: first page of the extent + * @mpd->next_page: page after the last page of the extent + * + * By the time mpage_da_submit_io() is called we expect all blocks + * to be allocated. this may be wrong if allocation failed. + * + * As pages are already locked by write_cache_pages(), we can't use it + */ +static int mpage_da_submit_io(struct mpage_da_data *mpd, + struct ext4_map_blocks *map) +{ + struct pagevec pvec; + unsigned long index, end; + int ret = 0, err, nr_pages, i; + struct inode *inode = mpd->inode; + struct address_space *mapping = inode->i_mapping; + loff_t size = i_size_read(inode); + unsigned int len, block_start; + struct buffer_head *bh, *page_bufs = NULL; + sector_t pblock = 0, cur_logical = 0; + struct ext4_io_submit io_submit; - pgoff_t first_page; /* The first page to write */ - pgoff_t next_page; /* Current page to examine */ - pgoff_t last_page; /* Last page to examine */ + BUG_ON(mpd->next_page <= mpd->first_page); + ext4_io_submit_init(&io_submit, mpd->wbc); + io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); + if (!io_submit.io_end) + return -ENOMEM; /* - * Extent to map - this can be after first_page because that can be - * fully mapped. We somewhat abuse m_flags to store whether the extent - * is delalloc or unwritten. + * We need to start from the first_page to the next_page - 1 + * to make sure we also write the mapped dirty buffer_heads. + * If we look at mpd->b_blocknr we would only be looking + * at the currently mapped buffer_heads. */ - struct ext4_map_blocks map; - struct ext4_io_submit io_submit; /* IO submission data */ -}; + index = mpd->first_page; + end = mpd->next_page - 1; + + pagevec_init(&pvec, 0); + while (index <= end) { + nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); + if (nr_pages == 0) + break; + for (i = 0; i < nr_pages; i++) { + int skip_page = 0; + struct page *page = pvec.pages[i]; + + index = page->index; + if (index > end) + break; + + if (index == size >> PAGE_CACHE_SHIFT) + len = size & ~PAGE_CACHE_MASK; + else + len = PAGE_CACHE_SIZE; + if (map) { + cur_logical = index << (PAGE_CACHE_SHIFT - + inode->i_blkbits); + pblock = map->m_pblk + (cur_logical - + map->m_lblk); + } + index++; + + BUG_ON(!PageLocked(page)); + BUG_ON(PageWriteback(page)); + + bh = page_bufs = page_buffers(page); + block_start = 0; + do { + if (map && (cur_logical >= map->m_lblk) && + (cur_logical <= (map->m_lblk + + (map->m_len - 1)))) { + if (buffer_delay(bh)) { + clear_buffer_delay(bh); + bh->b_blocknr = pblock; + } + if (buffer_unwritten(bh) || + buffer_mapped(bh)) + BUG_ON(bh->b_blocknr != pblock); + if (map->m_flags & EXT4_MAP_UNINIT) + set_buffer_uninit(bh); + clear_buffer_unwritten(bh); + } + + /* + * skip page if block allocation undone and + * block is dirty + */ + if (ext4_bh_delay_or_unwritten(NULL, bh)) + skip_page = 1; + bh = bh->b_this_page; + block_start += bh->b_size; + cur_logical++; + pblock++; + } while (bh != page_bufs); + + if (skip_page) { + unlock_page(page); + continue; + } + + clear_page_dirty_for_io(page); + err = ext4_bio_write_page(&io_submit, page, len, + mpd->wbc); + if (!err) + mpd->pages_written++; + /* + * In error case, we have to continue because + * remaining pages are still locked + */ + if (ret == 0) + ret = err; + } + pagevec_release(&pvec); + } + ext4_io_submit(&io_submit); + /* Drop io_end reference we got from init */ + ext4_put_io_end_defer(io_submit.io_end); + return ret; +} -static void mpage_release_unused_pages(struct mpage_da_data *mpd, - bool invalidate) +static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) { int nr_pages, i; pgoff_t index, end; struct pagevec pvec; struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; - - /* This is necessary when next_page == 0. */ - if (mpd->first_page >= mpd->next_page) - return; + ext4_lblk_t start, last; index = mpd->first_page; end = mpd->next_page - 1; - if (invalidate) { - ext4_lblk_t start, last; - start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); - ext4_es_remove_extent(inode, start, last - start + 1); - } + + start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); + ext4_es_remove_extent(inode, start, last - start + 1); pagevec_init(&pvec, 0); while (index <= end) { @@ -1460,15 +1611,14 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, break; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); - if (invalidate) { - block_invalidatepage(page, 0, PAGE_CACHE_SIZE); - ClearPageUptodate(page); - } + block_invalidatepage(page, 0); + ClearPageUptodate(page); unlock_page(page); } index = pvec.pages[nr_pages - 1]->index + 1; pagevec_release(&pvec); } + return; } static void ext4_print_free_blocks(struct inode *inode) @@ -1497,6 +1647,215 @@ static void ext4_print_free_blocks(struct inode *inode) return; } +/* + * mpage_da_map_and_submit - go through given space, map them + * if necessary, and then submit them for I/O + * + * @mpd - bh describing space + * + * The function skips space we know is already mapped to disk blocks. + * + */ +static void mpage_da_map_and_submit(struct mpage_da_data *mpd) +{ + int err, blks, get_blocks_flags; + struct ext4_map_blocks map, *mapp = NULL; + sector_t next = mpd->b_blocknr; + unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; + loff_t disksize = EXT4_I(mpd->inode)->i_disksize; + handle_t *handle = NULL; + + /* + * If the blocks are mapped already, or we couldn't accumulate + * any blocks, then proceed immediately to the submission stage. + */ + if ((mpd->b_size == 0) || + ((mpd->b_state & (1 << BH_Mapped)) && + !(mpd->b_state & (1 << BH_Delay)) && + !(mpd->b_state & (1 << BH_Unwritten)))) + goto submit_io; + + handle = ext4_journal_current_handle(); + BUG_ON(!handle); + + /* + * Call ext4_map_blocks() to allocate any delayed allocation + * blocks, or to convert an uninitialized extent to be + * initialized (in the case where we have written into + * one or more preallocated blocks). + * + * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to + * indicate that we are on the delayed allocation path. This + * affects functions in many different parts of the allocation + * call path. This flag exists primarily because we don't + * want to change *many* call functions, so ext4_map_blocks() + * will set the EXT4_STATE_DELALLOC_RESERVED flag once the + * inode's allocation semaphore is taken. + * + * If the blocks in questions were delalloc blocks, set + * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting + * variables are updated after the blocks have been allocated. + */ + map.m_lblk = next; + map.m_len = max_blocks; + /* + * We're in delalloc path and it is possible that we're going to + * need more metadata blocks than previously reserved. However + * we must not fail because we're in writeback and there is + * nothing we can do about it so it might result in data loss. + * So use reserved blocks to allocate metadata if possible. + */ + get_blocks_flags = EXT4_GET_BLOCKS_CREATE | + EXT4_GET_BLOCKS_METADATA_NOFAIL; + if (ext4_should_dioread_nolock(mpd->inode)) + get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; + if (mpd->b_state & (1 << BH_Delay)) + get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; + + + blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); + if (blks < 0) { + struct super_block *sb = mpd->inode->i_sb; + + err = blks; + /* + * If get block returns EAGAIN or ENOSPC and there + * appears to be free blocks we will just let + * mpage_da_submit_io() unlock all of the pages. + */ + if (err == -EAGAIN) + goto submit_io; + + if (err == -ENOSPC && ext4_count_free_clusters(sb)) { + mpd->retval = err; + goto submit_io; + } + + /* + * get block failure will cause us to loop in + * writepages, because a_ops->writepage won't be able + * to make progress. The page will be redirtied by + * writepage and writepages will again try to write + * the same. + */ + if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { + ext4_msg(sb, KERN_CRIT, + "delayed block allocation failed for inode %lu " + "at logical offset %llu with max blocks %zd " + "with error %d", mpd->inode->i_ino, + (unsigned long long) next, + mpd->b_size >> mpd->inode->i_blkbits, err); + ext4_msg(sb, KERN_CRIT, + "This should not happen!! Data will be lost"); + if (err == -ENOSPC) + ext4_print_free_blocks(mpd->inode); + } + /* invalidate all the pages */ + ext4_da_block_invalidatepages(mpd); + + /* Mark this page range as having been completed */ + mpd->io_done = 1; + return; + } + BUG_ON(blks == 0); + + mapp = ↦ + if (map.m_flags & EXT4_MAP_NEW) { + struct block_device *bdev = mpd->inode->i_sb->s_bdev; + int i; + + for (i = 0; i < map.m_len; i++) + unmap_underlying_metadata(bdev, map.m_pblk + i); + } + + /* + * Update on-disk size along with block allocation. + */ + disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; + if (disksize > i_size_read(mpd->inode)) + disksize = i_size_read(mpd->inode); + if (disksize > EXT4_I(mpd->inode)->i_disksize) { + ext4_update_i_disksize(mpd->inode, disksize); + err = ext4_mark_inode_dirty(handle, mpd->inode); + if (err) + ext4_error(mpd->inode->i_sb, + "Failed to mark inode %lu dirty", + mpd->inode->i_ino); + } + +submit_io: + mpage_da_submit_io(mpd, mapp); + mpd->io_done = 1; +} + +#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ + (1 << BH_Delay) | (1 << BH_Unwritten)) + +/* + * mpage_add_bh_to_extent - try to add one more block to extent of blocks + * + * @mpd->lbh - extent of blocks + * @logical - logical number of the block in the file + * @b_state - b_state of the buffer head added + * + * the function is used to collect contig. blocks in same state + */ +static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical, + unsigned long b_state) +{ + sector_t next; + int blkbits = mpd->inode->i_blkbits; + int nrblocks = mpd->b_size >> blkbits; + + /* + * XXX Don't go larger than mballoc is willing to allocate + * This is a stopgap solution. We eventually need to fold + * mpage_da_submit_io() into this function and then call + * ext4_map_blocks() multiple times in a loop + */ + if (nrblocks >= (8*1024*1024 >> blkbits)) + goto flush_it; + + /* check if the reserved journal credits might overflow */ + if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) { + if (nrblocks >= EXT4_MAX_TRANS_DATA) { + /* + * With non-extent format we are limited by the journal + * credit available. Total credit needed to insert + * nrblocks contiguous blocks is dependent on the + * nrblocks. So limit nrblocks. + */ + goto flush_it; + } + } + /* + * First block in the extent + */ + if (mpd->b_size == 0) { + mpd->b_blocknr = logical; + mpd->b_size = 1 << blkbits; + mpd->b_state = b_state & BH_FLAGS; + return; + } + + next = mpd->b_blocknr + nrblocks; + /* + * Can we merge the block to our big extent? + */ + if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { + mpd->b_size += 1 << blkbits; + return; + } + +flush_it: + /* + * We couldn't merge the block to our extent, so we + * need to flush current extent and start new one + */ + mpage_da_map_and_submit(mpd); + return; +} + static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) { return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); @@ -1529,8 +1888,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, "logical block %lu\n", inode->i_ino, map->m_len, (unsigned long) map->m_lblk); - ext4_es_lru_add(inode); - /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, iblock, &es)) { @@ -1804,7 +2161,7 @@ static int __ext4_journalled_writepage(struct page *page, * lock so we have to do some magic. * * This function can get called via... - * - ext4_writepages after taking page lock (have journal handle) + * - ext4_da_writepages after taking page lock (have journal handle) * - journal_submit_inode_data_buffers (no journal handle) * - shrink_page_list via the kswapd/direct reclaim (no journal handle) * - grab_page_cache when doing write_begin (have journal handle) @@ -1859,428 +2216,106 @@ static int ext4_writepage(struct page *page, * journal_submit_inode_data_buffers() and in that case we must write * allocated buffers to achieve data=ordered mode guarantees. */ - if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, - ext4_bh_delay_or_unwritten)) { - redirty_page_for_writepage(wbc, page); - if (current->flags & PF_MEMALLOC) { - /* - * For memory cleaning there's no point in writing only - * some buffers. So just bail out. Warn if we came here - * from direct reclaim. - */ - WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) - == PF_MEMALLOC); - unlock_page(page); - return 0; - } - } - - if (PageChecked(page) && ext4_should_journal_data(inode)) - /* - * It's mmapped pagecache. Add buffers and journal it. There - * doesn't seem much point in redirtying the page here. - */ - return __ext4_journalled_writepage(page, len); - - ext4_io_submit_init(&io_submit, wbc); - io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); - if (!io_submit.io_end) { - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return -ENOMEM; - } - ret = ext4_bio_write_page(&io_submit, page, len, wbc); - ext4_io_submit(&io_submit); - /* Drop io_end reference we got from init */ - ext4_put_io_end_defer(io_submit.io_end); - return ret; -} - -#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) - -/* - * mballoc gives us at most this number of blocks... - * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). - * The rest of mballoc seems to handle chunks upto full group size. - */ -#define MAX_WRITEPAGES_EXTENT_LEN 2048 - -/* - * mpage_add_bh_to_extent - try to add bh to extent of blocks to map - * - * @mpd - extent of blocks - * @lblk - logical number of the block in the file - * @b_state - b_state of the buffer head added - * - * the function is used to collect contig. blocks in same state - */ -static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, - unsigned long b_state) -{ - struct ext4_map_blocks *map = &mpd->map; - - /* Don't go larger than mballoc is willing to allocate */ - if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) - return 0; - - /* First block in the extent? */ - if (map->m_len == 0) { - map->m_lblk = lblk; - map->m_len = 1; - map->m_flags = b_state & BH_FLAGS; - return 1; - } - - /* Can we merge the block to our big extent? */ - if (lblk == map->m_lblk + map->m_len && - (b_state & BH_FLAGS) == map->m_flags) { - map->m_len++; - return 1; - } - return 0; -} - -static bool add_page_bufs_to_extent(struct mpage_da_data *mpd, - struct buffer_head *head, - struct buffer_head *bh, - ext4_lblk_t lblk) -{ - struct inode *inode = mpd->inode; - ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) - >> inode->i_blkbits; - - do { - BUG_ON(buffer_locked(bh)); - - if (!buffer_dirty(bh) || !buffer_mapped(bh) || - (!buffer_delay(bh) && !buffer_unwritten(bh)) || - lblk >= blocks) { - /* Found extent to map? */ - if (mpd->map.m_len) - return false; - if (lblk >= blocks) - return true; - continue; - } - if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state)) - return false; - } while (lblk++, (bh = bh->b_this_page) != head); - return true; -} - -static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) -{ - int len; - loff_t size = i_size_read(mpd->inode); - int err; - - BUG_ON(page->index != mpd->first_page); - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; - else - len = PAGE_CACHE_SIZE; - clear_page_dirty_for_io(page); - err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc); - if (!err) - mpd->wbc->nr_to_write--; - mpd->first_page++; - - return err; -} - -/* - * mpage_map_buffers - update buffers corresponding to changed extent and - * submit fully mapped pages for IO - * - * @mpd - description of extent to map, on return next extent to map - * - * Scan buffers corresponding to changed extent (we expect corresponding pages - * to be already locked) and update buffer state according to new extent state. - * We map delalloc buffers to their physical location, clear unwritten bits, - * and mark buffers as uninit when we perform writes to uninitialized extents - * and do extent conversion after IO is finished. If the last page is not fully - * mapped, we update @map to the next extent in the last page that needs - * mapping. Otherwise we submit the page for IO. - */ -static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) -{ - struct pagevec pvec; - int nr_pages, i; - struct inode *inode = mpd->inode; - struct buffer_head *head, *bh; - int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; - ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) - >> inode->i_blkbits; - pgoff_t start, end; - ext4_lblk_t lblk; - sector_t pblock; - int err; - - start = mpd->map.m_lblk >> bpp_bits; - end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; - lblk = start << bpp_bits; - pblock = mpd->map.m_pblk; - - pagevec_init(&pvec, 0); - while (start <= end) { - nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start, - PAGEVEC_SIZE); - if (nr_pages == 0) - break; - for (i = 0; i < nr_pages; i++) { - struct page *page = pvec.pages[i]; - - if (page->index > end) - break; - /* Upto 'end' pages must be contiguous */ - BUG_ON(page->index != start); - bh = head = page_buffers(page); - do { - if (lblk < mpd->map.m_lblk) - continue; - if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { - /* - * Buffer after end of mapped extent. - * Find next buffer in the page to map. - */ - mpd->map.m_len = 0; - mpd->map.m_flags = 0; - add_page_bufs_to_extent(mpd, head, bh, - lblk); - pagevec_release(&pvec); - return 0; - } - if (buffer_delay(bh)) { - clear_buffer_delay(bh); - bh->b_blocknr = pblock++; - } - clear_buffer_unwritten(bh); - } while (++lblk < blocks && - (bh = bh->b_this_page) != head); - - /* - * FIXME: This is going to break if dioread_nolock - * supports blocksize < pagesize as we will try to - * convert potentially unmapped parts of inode. - */ - mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; - /* Page fully mapped - let IO run! */ - err = mpage_submit_page(mpd, page); - if (err < 0) { - pagevec_release(&pvec); - return err; - } - start++; - } - pagevec_release(&pvec); - } - /* Extent fully mapped and matches with page boundary. We are done. */ - mpd->map.m_len = 0; - mpd->map.m_flags = 0; - return 0; -} - -static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) -{ - struct inode *inode = mpd->inode; - struct ext4_map_blocks *map = &mpd->map; - int get_blocks_flags; - int err; - - trace_ext4_da_write_pages_extent(inode, map); - /* - * Call ext4_map_blocks() to allocate any delayed allocation blocks, or - * to convert an uninitialized extent to be initialized (in the case - * where we have written into one or more preallocated blocks). It is - * possible that we're going to need more metadata blocks than - * previously reserved. However we must not fail because we're in - * writeback and there is nothing we can do about it so it might result - * in data loss. So use reserved blocks to allocate metadata if - * possible. - * - * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks - * in question are delalloc blocks. This affects functions in many - * different parts of the allocation call path. This flag exists - * primarily because we don't want to change *many* call functions, so - * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag - * once the inode's allocation semaphore is taken. - */ - get_blocks_flags = EXT4_GET_BLOCKS_CREATE | - EXT4_GET_BLOCKS_METADATA_NOFAIL; - if (ext4_should_dioread_nolock(inode)) - get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; - if (map->m_flags & (1 << BH_Delay)) - get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; - - err = ext4_map_blocks(handle, inode, map, get_blocks_flags); - if (err < 0) - return err; - if (map->m_flags & EXT4_MAP_UNINIT) { - if (!mpd->io_submit.io_end->handle && - ext4_handle_valid(handle)) { - mpd->io_submit.io_end->handle = handle->h_rsv_handle; - handle->h_rsv_handle = NULL; - } - ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); - } - - BUG_ON(map->m_len == 0); - if (map->m_flags & EXT4_MAP_NEW) { - struct block_device *bdev = inode->i_sb->s_bdev; - int i; - - for (i = 0; i < map->m_len; i++) - unmap_underlying_metadata(bdev, map->m_pblk + i); - } - return 0; -} - -/* - * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length - * mpd->len and submit pages underlying it for IO - * - * @handle - handle for journal operations - * @mpd - extent to map - * - * The function maps extent starting at mpd->lblk of length mpd->len. If it is - * delayed, blocks are allocated, if it is unwritten, we may need to convert - * them to initialized or split the described range from larger unwritten - * extent. Note that we need not map all the described range since allocation - * can return less blocks or the range is covered by more unwritten extents. We - * cannot map more because we are limited by reserved transaction credits. On - * the other hand we always make sure that the last touched page is fully - * mapped so that it can be written out (and thus forward progress is - * guaranteed). After mapping we submit all mapped pages for IO. - */ -static int mpage_map_and_submit_extent(handle_t *handle, - struct mpage_da_data *mpd, - bool *give_up_on_write) -{ - struct inode *inode = mpd->inode; - struct ext4_map_blocks *map = &mpd->map; - int err; - loff_t disksize; - - mpd->io_submit.io_end->offset = - ((loff_t)map->m_lblk) << inode->i_blkbits; - while (map->m_len) { - err = mpage_map_one_extent(handle, mpd); - if (err < 0) { - struct super_block *sb = inode->i_sb; - - if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) - goto invalidate_dirty_pages; + if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, + ext4_bh_delay_or_unwritten)) { + redirty_page_for_writepage(wbc, page); + if (current->flags & PF_MEMALLOC) { /* - * Let the uper layers retry transient errors. - * In the case of ENOSPC, if ext4_count_free_blocks() - * is non-zero, a commit should free up blocks. + * For memory cleaning there's no point in writing only + * some buffers. So just bail out. Warn if we came here + * from direct reclaim. */ - if ((err == -ENOMEM) || - (err == -ENOSPC && ext4_count_free_clusters(sb))) - return err; - ext4_msg(sb, KERN_CRIT, - "Delayed block allocation failed for " - "inode %lu at logical offset %llu with" - " max blocks %u with error %d", - inode->i_ino, - (unsigned long long)map->m_lblk, - (unsigned)map->m_len, -err); - ext4_msg(sb, KERN_CRIT, - "This should not happen!! Data will " - "be lost\n"); - if (err == -ENOSPC) - ext4_print_free_blocks(inode); - invalidate_dirty_pages: - *give_up_on_write = true; - return err; + WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) + == PF_MEMALLOC); + unlock_page(page); + return 0; } - /* - * Update buffer state, submit mapped pages, and get us new - * extent to map - */ - err = mpage_map_and_submit_buffers(mpd); - if (err < 0) - return err; } - /* Update on-disk size after IO is submitted */ - disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; - if (disksize > i_size_read(inode)) - disksize = i_size_read(inode); - if (disksize > EXT4_I(inode)->i_disksize) { - int err2; + if (PageChecked(page) && ext4_should_journal_data(inode)) + /* + * It's mmapped pagecache. Add buffers and journal it. There + * doesn't seem much point in redirtying the page here. + */ + return __ext4_journalled_writepage(page, len); - ext4_update_i_disksize(inode, disksize); - err2 = ext4_mark_inode_dirty(handle, inode); - if (err2) - ext4_error(inode->i_sb, - "Failed to mark inode %lu dirty", - inode->i_ino); - if (!err) - err = err2; + ext4_io_submit_init(&io_submit, wbc); + io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); + if (!io_submit.io_end) { + redirty_page_for_writepage(wbc, page); + return -ENOMEM; } - return err; + ret = ext4_bio_write_page(&io_submit, page, len, wbc); + ext4_io_submit(&io_submit); + /* Drop io_end reference we got from init */ + ext4_put_io_end_defer(io_submit.io_end); + return ret; } /* - * Calculate the total number of credits to reserve for one writepages - * iteration. This is called from ext4_writepages(). We map an extent of - * upto MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping - * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + - * bpp - 1 blocks in bpp different extents. + * This is called via ext4_da_writepages() to + * calculate the total number of credits to reserve to fit + * a single extent allocation into a single transaction, + * ext4_da_writpeages() will loop calling this before + * the block allocation. */ + static int ext4_da_writepages_trans_blocks(struct inode *inode) { - int bpp = ext4_journal_blocks_per_page(inode); + int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; + + /* + * With non-extent format the journal credit needed to + * insert nrblocks contiguous block is dependent on + * number of contiguous block. So we will limit + * number of contiguous block to a sane value + */ + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && + (max_blocks > EXT4_MAX_TRANS_DATA)) + max_blocks = EXT4_MAX_TRANS_DATA; - return ext4_meta_trans_blocks(inode, - MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); + return ext4_chunk_trans_blocks(inode, max_blocks); } /* - * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages - * and underlying extent to map - * - * @mpd - where to look for pages - * - * Walk dirty pages in the mapping. If they are fully mapped, submit them for - * IO immediately. When we find a page which isn't mapped we start accumulating - * extent of buffers underlying these pages that needs mapping (formed by - * either delayed or unwritten buffers). We also lock the pages containing - * these buffers. The extent found is returned in @mpd structure (starting at - * mpd->lblk with length mpd->len blocks). - * - * Note that this function can attach bios to one io_end structure which are - * neither logically nor physically contiguous. Although it may seem as an - * unnecessary complication, it is actually inevitable in blocksize < pagesize - * case as we need to track IO to all buffers underlying a page in one io_end. + * write_cache_pages_da - walk the list of dirty pages of the given + * address space and accumulate pages that need writing, and call + * mpage_da_map_and_submit to map a single contiguous memory region + * and then write them. */ -static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) +static int write_cache_pages_da(handle_t *handle, + struct address_space *mapping, + struct writeback_control *wbc, + struct mpage_da_data *mpd, + pgoff_t *done_index) { - struct address_space *mapping = mpd->inode->i_mapping; - struct pagevec pvec; - unsigned int nr_pages; - pgoff_t index = mpd->first_page; - pgoff_t end = mpd->last_page; - int tag; - int i, err = 0; - int blkbits = mpd->inode->i_blkbits; - ext4_lblk_t lblk; - struct buffer_head *head; + struct buffer_head *bh, *head; + struct inode *inode = mapping->host; + struct pagevec pvec; + unsigned int nr_pages; + sector_t logical; + pgoff_t index, end; + long nr_to_write = wbc->nr_to_write; + int i, tag, ret = 0; + + memset(mpd, 0, sizeof(struct mpage_da_data)); + mpd->wbc = wbc; + mpd->inode = inode; + pagevec_init(&pvec, 0); + index = wbc->range_start >> PAGE_CACHE_SHIFT; + end = wbc->range_end >> PAGE_CACHE_SHIFT; - if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; - pagevec_init(&pvec, 0); - mpd->map.m_len = 0; - mpd->next_page = index; + *done_index = index; while (index <= end) { nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) - goto out; + return 0; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; @@ -2295,21 +2330,31 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) if (page->index > end) goto out; - /* If we can't merge this page, we are done. */ - if (mpd->map.m_len > 0 && mpd->next_page != page->index) - goto out; + *done_index = page->index + 1; + + /* + * If we can't merge this page, and we have + * accumulated an contiguous region, write it + */ + if ((mpd->next_page != page->index) && + (mpd->next_page != mpd->first_page)) { + mpage_da_map_and_submit(mpd); + goto ret_extent_tail; + } lock_page(page); + /* - * If the page is no longer dirty, or its mapping no - * longer corresponds to inode we are writing (which - * means it has been truncated or invalidated), or the - * page is already under writeback and we are not doing - * a data integrity writeback, skip the page + * If the page is no longer dirty, or its + * mapping no longer corresponds to inode we + * are writing (which means it has been + * truncated or invalidated), or the page is + * already under writeback and we are not + * doing a data integrity writeback, skip the page */ if (!PageDirty(page) || (PageWriteback(page) && - (mpd->wbc->sync_mode == WB_SYNC_NONE)) || + (wbc->sync_mode == WB_SYNC_NONE)) || unlikely(page->mapping != mapping)) { unlock_page(page); continue; @@ -2318,70 +2363,106 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) wait_on_page_writeback(page); BUG_ON(PageWriteback(page)); - if (mpd->map.m_len == 0) + /* + * If we have inline data and arrive here, it means that + * we will soon create the block for the 1st page, so + * we'd better clear the inline data here. + */ + if (ext4_has_inline_data(inode)) { + BUG_ON(ext4_test_inode_state(inode, + EXT4_STATE_MAY_INLINE_DATA)); + ext4_destroy_inline_data(handle, inode); + } + + if (mpd->next_page != page->index) mpd->first_page = page->index; mpd->next_page = page->index + 1; + logical = (sector_t) page->index << + (PAGE_CACHE_SHIFT - inode->i_blkbits); + /* Add all dirty buffers to mpd */ - lblk = ((ext4_lblk_t)page->index) << - (PAGE_CACHE_SHIFT - blkbits); head = page_buffers(page); - if (!add_page_bufs_to_extent(mpd, head, head, lblk)) - goto out; - /* So far everything mapped? Submit the page for IO. */ - if (mpd->map.m_len == 0) { - err = mpage_submit_page(mpd, page); - if (err < 0) + bh = head; + do { + BUG_ON(buffer_locked(bh)); + /* + * We need to try to allocate unmapped blocks + * in the same page. Otherwise we won't make + * progress with the page in ext4_writepage + */ + if (ext4_bh_delay_or_unwritten(NULL, bh)) { + mpage_add_bh_to_extent(mpd, logical, + bh->b_state); + if (mpd->io_done) + goto ret_extent_tail; + } else if (buffer_dirty(bh) && + buffer_mapped(bh)) { + /* + * mapped dirty buffer. We need to + * update the b_state because we look + * at b_state in mpage_da_map_blocks. + * We don't update b_size because if we + * find an unmapped buffer_head later + * we need to use the b_state flag of + * that buffer_head. + */ + if (mpd->b_size == 0) + mpd->b_state = + bh->b_state & BH_FLAGS; + } + logical++; + } while ((bh = bh->b_this_page) != head); + + if (nr_to_write > 0) { + nr_to_write--; + if (nr_to_write == 0 && + wbc->sync_mode == WB_SYNC_NONE) + /* + * We stop writing back only if we are + * not doing integrity sync. In case of + * integrity sync we have to keep going + * because someone may be concurrently + * dirtying pages, and we might have + * synced a lot of newly appeared dirty + * pages, but have not synced all of the + * old dirty pages. + */ goto out; } - - /* - * Accumulated enough dirty pages? This doesn't apply - * to WB_SYNC_ALL mode. For integrity sync we have to - * keep going because someone may be concurrently - * dirtying pages, and we might have synced a lot of - * newly appeared dirty pages, but have not synced all - * of the old dirty pages. - */ - if (mpd->wbc->sync_mode == WB_SYNC_NONE && - mpd->next_page - mpd->first_page >= - mpd->wbc->nr_to_write) - goto out; } pagevec_release(&pvec); cond_resched(); } return 0; +ret_extent_tail: + ret = MPAGE_DA_EXTENT_TAIL; out: pagevec_release(&pvec); - return err; -} - -static int __writepage(struct page *page, struct writeback_control *wbc, - void *data) -{ - struct address_space *mapping = data; - int ret = ext4_writepage(page, wbc); - mapping_set_error(mapping, ret); + cond_resched(); return ret; } -static int ext4_writepages(struct address_space *mapping, - struct writeback_control *wbc) + +static int ext4_da_writepages(struct address_space *mapping, + struct writeback_control *wbc) { - pgoff_t writeback_index = 0; - long nr_to_write = wbc->nr_to_write; + pgoff_t index; int range_whole = 0; - int cycled = 1; handle_t *handle = NULL; struct mpage_da_data mpd; struct inode *inode = mapping->host; - int needed_blocks, rsv_blocks = 0, ret = 0; + int pages_written = 0; + unsigned int max_pages; + int range_cyclic, cycled = 1, io_done = 0; + int needed_blocks, ret = 0; + long desired_nr_to_write, nr_to_writebump = 0; + loff_t range_start = wbc->range_start; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); - bool done; + pgoff_t done_index = 0; + pgoff_t end; struct blk_plug plug; - bool give_up_on_write = false; - trace_ext4_writepages(inode, wbc); + trace_ext4_da_writepages(inode, wbc); /* * No pages to write? This is mainly a kludge to avoid starting @@ -2391,165 +2472,164 @@ static int ext4_writepages(struct address_space *mapping, if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) return 0; - if (ext4_should_journal_data(inode)) { - struct blk_plug plug; - int ret; - - blk_start_plug(&plug); - ret = write_cache_pages(mapping, wbc, __writepage, mapping); - blk_finish_plug(&plug); - return ret; - } - /* * If the filesystem has aborted, it is read-only, so return * right away instead of dumping stack traces later on that * will obscure the real source of the problem. We test * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because * the latter could be true if the filesystem is mounted - * read-only, and in that case, ext4_writepages should + * read-only, and in that case, ext4_da_writepages should * *never* be called, so if that ever happens, we would want * the stack trace. */ if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) return -EROFS; - if (ext4_should_dioread_nolock(inode)) { - /* - * We may need to convert upto one extent per block in - * the page and we may dirty the inode. - */ - rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); - } - - /* - * If we have inline data and arrive here, it means that - * we will soon create the block for the 1st page, so - * we'd better clear the inline data here. - */ - if (ext4_has_inline_data(inode)) { - /* Just inode will be modified... */ - handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - goto out_writepages; - } - BUG_ON(ext4_test_inode_state(inode, - EXT4_STATE_MAY_INLINE_DATA)); - ext4_destroy_inline_data(handle, inode); - ext4_journal_stop(handle); - } - if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; + range_cyclic = wbc->range_cyclic; if (wbc->range_cyclic) { - writeback_index = mapping->writeback_index; - if (writeback_index) + index = mapping->writeback_index; + if (index) cycled = 0; - mpd.first_page = writeback_index; - mpd.last_page = -1; + wbc->range_start = index << PAGE_CACHE_SHIFT; + wbc->range_end = LLONG_MAX; + wbc->range_cyclic = 0; + end = -1; } else { - mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; - mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_CACHE_SHIFT; + end = wbc->range_end >> PAGE_CACHE_SHIFT; + } + + /* + * This works around two forms of stupidity. The first is in + * the writeback code, which caps the maximum number of pages + * written to be 1024 pages. This is wrong on multiple + * levels; different architectues have a different page size, + * which changes the maximum amount of data which gets + * written. Secondly, 4 megabytes is way too small. XFS + * forces this value to be 16 megabytes by multiplying + * nr_to_write parameter by four, and then relies on its + * allocator to allocate larger extents to make them + * contiguous. Unfortunately this brings us to the second + * stupidity, which is that ext4's mballoc code only allocates + * at most 2048 blocks. So we force contiguous writes up to + * the number of dirty blocks in the inode, or + * sbi->max_writeback_mb_bump whichever is smaller. + */ + max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); + if (!range_cyclic && range_whole) { + if (wbc->nr_to_write == LONG_MAX) + desired_nr_to_write = wbc->nr_to_write; + else + desired_nr_to_write = wbc->nr_to_write * 8; + } else + desired_nr_to_write = ext4_num_dirty_pages(inode, index, + max_pages); + if (desired_nr_to_write > max_pages) + desired_nr_to_write = max_pages; + + if (wbc->nr_to_write < desired_nr_to_write) { + nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; + wbc->nr_to_write = desired_nr_to_write; } - mpd.inode = inode; - mpd.wbc = wbc; - ext4_io_submit_init(&mpd.io_submit, wbc); retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) - tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); - done = false; + tag_pages_for_writeback(mapping, index, end); + blk_start_plug(&plug); - while (!done && mpd.first_page <= mpd.last_page) { - /* For each extent of pages we use new io_end */ - mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); - if (!mpd.io_submit.io_end) { - ret = -ENOMEM; - break; - } + while (!ret && wbc->nr_to_write > 0) { /* - * We have two constraints: We find one extent to map and we - * must always write out whole page (makes a difference when - * blocksize < pagesize) so that we don't block on IO when we - * try to write out the rest of the page. Journalled mode is - * not supported by delalloc. + * we insert one extent at a time. So we need + * credit needed for single extent allocation. + * journalled mode is currently not supported + * by delalloc */ BUG_ON(ext4_should_journal_data(inode)); needed_blocks = ext4_da_writepages_trans_blocks(inode); - /* start a new transaction */ - handle = ext4_journal_start_with_reserve(inode, - EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); + /* start a new transaction*/ + handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, + needed_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " "%ld pages, ino %lu; err %d", __func__, wbc->nr_to_write, inode->i_ino, ret); - /* Release allocated io_end */ - ext4_put_io_end(mpd.io_submit.io_end); - break; + blk_finish_plug(&plug); + goto out_writepages; } - trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); - ret = mpage_prepare_extent_to_map(&mpd); - if (!ret) { - if (mpd.map.m_len) - ret = mpage_map_and_submit_extent(handle, &mpd, - &give_up_on_write); - else { - /* - * We scanned the whole range (or exhausted - * nr_to_write), submitted what was mapped and - * didn't find anything needing mapping. We are - * done. - */ - done = true; - } + /* + * Now call write_cache_pages_da() to find the next + * contiguous region of logical blocks that need + * blocks to be allocated by ext4 and submit them. + */ + ret = write_cache_pages_da(handle, mapping, + wbc, &mpd, &done_index); + /* + * If we have a contiguous extent of pages and we + * haven't done the I/O yet, map the blocks and submit + * them for I/O. + */ + if (!mpd.io_done && mpd.next_page != mpd.first_page) { + mpage_da_map_and_submit(&mpd); + ret = MPAGE_DA_EXTENT_TAIL; } + trace_ext4_da_write_pages(inode, &mpd); + wbc->nr_to_write -= mpd.pages_written; + ext4_journal_stop(handle); - /* Submit prepared bio */ - ext4_io_submit(&mpd.io_submit); - /* Unlock pages we didn't use */ - mpage_release_unused_pages(&mpd, give_up_on_write); - /* Drop our io_end reference we got from init */ - ext4_put_io_end(mpd.io_submit.io_end); - - if (ret == -ENOSPC && sbi->s_journal) { - /* - * Commit the transaction which would + + if ((mpd.retval == -ENOSPC) && sbi->s_journal) { + /* commit the transaction which would * free blocks released in the transaction * and try again */ jbd2_journal_force_commit_nested(sbi->s_journal); ret = 0; - continue; - } - /* Fatal error - ENOMEM, EIO... */ - if (ret) + } else if (ret == MPAGE_DA_EXTENT_TAIL) { + /* + * Got one extent now try with rest of the pages. + * If mpd.retval is set -EIO, journal is aborted. + * So we don't need to write any more. + */ + pages_written += mpd.pages_written; + ret = mpd.retval; + io_done = 1; + } else if (wbc->nr_to_write) + /* + * There is no more writeout needed + * or we requested for a noblocking writeout + * and we found the device congested + */ break; } blk_finish_plug(&plug); - if (!ret && !cycled) { + if (!io_done && !cycled) { cycled = 1; - mpd.last_page = writeback_index - 1; - mpd.first_page = 0; + index = 0; + wbc->range_start = index << PAGE_CACHE_SHIFT; + wbc->range_end = mapping->writeback_index - 1; goto retry; } /* Update index */ + wbc->range_cyclic = range_cyclic; if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) /* - * Set the writeback_index so that range_cyclic + * set the writeback_index so that range_cyclic * mode will write it back later */ - mapping->writeback_index = mpd.first_page; + mapping->writeback_index = done_index; out_writepages: - trace_ext4_writepages_result(inode, wbc, ret, - nr_to_write - wbc->nr_to_write); + wbc->nr_to_write -= nr_to_writebump; + wbc->range_start = range_start; + trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); return ret; } @@ -2761,8 +2841,7 @@ static int ext4_da_write_end(struct file *file, return ret ? ret : copied; } -static void ext4_da_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void ext4_da_invalidatepage(struct page *page, unsigned long offset) { /* * Drop reserved blocks @@ -2771,10 +2850,10 @@ static void ext4_da_invalidatepage(struct page *page, unsigned int offset, if (!page_has_buffers(page)) goto out; - ext4_da_page_release_reservation(page, offset, length); + ext4_da_page_release_reservation(page, offset); out: - ext4_invalidatepage(page, offset, length); + ext4_invalidatepage(page, offset); return; } @@ -2797,7 +2876,7 @@ int ext4_alloc_da_blocks(struct inode *inode) * laptop_mode, not even desirable). However, to do otherwise * would require replicating code paths in: * - * ext4_writepages() -> + * ext4_da_writepages() -> * write_cache_pages() ---> (via passed in callback function) * __mpage_da_writepage() --> * mpage_add_bh_to_extent() @@ -2922,40 +3001,37 @@ ext4_readpages(struct file *file, struct address_space *mapping, return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); } -static void ext4_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void ext4_invalidatepage(struct page *page, unsigned long offset) { - trace_ext4_invalidatepage(page, offset, length); + trace_ext4_invalidatepage(page, offset); /* No journalling happens on data buffers when this function is used */ WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); - block_invalidatepage(page, offset, length); + block_invalidatepage(page, offset); } static int __ext4_journalled_invalidatepage(struct page *page, - unsigned int offset, - unsigned int length) + unsigned long offset) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); - trace_ext4_journalled_invalidatepage(page, offset, length); + trace_ext4_journalled_invalidatepage(page, offset); /* * If it's a full truncate we just forget about the pending dirtying */ - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0) ClearPageChecked(page); - return jbd2_journal_invalidatepage(journal, page, offset, length); + return jbd2_journal_invalidatepage(journal, page, offset); } /* Wrapper for aops... */ static void ext4_journalled_invalidatepage(struct page *page, - unsigned int offset, - unsigned int length) + unsigned long offset) { - WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); + WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0); } static int ext4_releasepage(struct page *page, gfp_t wait) @@ -3065,18 +3141,11 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, BUG_ON(iocb->private == NULL); - /* - * Make all waiters for direct IO properly wait also for extent - * conversion. This also disallows race between truncate() and - * overwrite DIO as i_dio_count needs to be incremented under i_mutex. - */ - if (rw == WRITE) - atomic_inc(&inode->i_dio_count); - /* If we do a overwrite dio, i_mutex locking can be released */ overwrite = *((int *)iocb->private); if (overwrite) { + atomic_inc(&inode->i_dio_count); down_read(&EXT4_I(inode)->i_data_sem); mutex_unlock(&inode->i_mutex); } @@ -3147,19 +3216,11 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, ext4_inode_aio_set(inode, NULL); ext4_put_io_end(io_end); /* - * When no IO was submitted ext4_end_io_dio() was not + * In case of error or no write ext4_end_io_dio() was not * called so we have to put iocb's reference. */ - if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) { + if (ret <= 0 && ret != -EIOCBQUEUED) { WARN_ON(iocb->private != io_end); - WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); - WARN_ON(io_end->iocb); - /* - * Generic code already did inode_dio_done() so we - * have to clear EXT4_IO_END_DIRECT to not do it for - * the second time. - */ - io_end->flag = 0; ext4_put_io_end(io_end); iocb->private = NULL; } @@ -3171,7 +3232,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, * for non AIO case, since the IO is already * completed, we could do the conversion right here */ - err = ext4_convert_unwritten_extents(NULL, inode, + err = ext4_convert_unwritten_extents(inode, offset, ret); if (err < 0) ret = err; @@ -3179,10 +3240,9 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, } retake_lock: - if (rw == WRITE) - inode_dio_done(inode); /* take i_mutex locking again if we do a ovewrite dio */ if (overwrite) { + inode_dio_done(inode); up_read(&EXT4_I(inode)->i_data_sem); mutex_lock(&inode->i_mutex); } @@ -3241,7 +3301,6 @@ static const struct address_space_operations ext4_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, - .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_write_end, .bmap = ext4_bmap, @@ -3257,7 +3316,6 @@ static const struct address_space_operations ext4_journalled_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, - .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_journalled_write_end, .set_page_dirty = ext4_journalled_set_page_dirty, @@ -3273,7 +3331,7 @@ static const struct address_space_operations ext4_da_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, - .writepages = ext4_writepages, + .writepages = ext4_da_writepages, .write_begin = ext4_da_write_begin, .write_end = ext4_da_write_end, .bmap = ext4_bmap, @@ -3306,56 +3364,89 @@ void ext4_set_aops(struct inode *inode) inode->i_mapping->a_ops = &ext4_aops; } + /* - * ext4_block_truncate_page() zeroes out a mapping from file offset `from' - * up to the end of the block which corresponds to `from'. - * This required during truncate. We need to physically zero the tail end - * of that block so it doesn't yield old data if the file is later grown. + * ext4_discard_partial_page_buffers() + * Wrapper function for ext4_discard_partial_page_buffers_no_lock. + * This function finds and locks the page containing the offset + * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. + * Calling functions that already have the page locked should call + * ext4_discard_partial_page_buffers_no_lock directly. */ -int ext4_block_truncate_page(handle_t *handle, - struct address_space *mapping, loff_t from) +int ext4_discard_partial_page_buffers(handle_t *handle, + struct address_space *mapping, loff_t from, + loff_t length, int flags) { - unsigned offset = from & (PAGE_CACHE_SIZE-1); - unsigned length; - unsigned blocksize; struct inode *inode = mapping->host; + struct page *page; + int err = 0; - blocksize = inode->i_sb->s_blocksize; - length = blocksize - (offset & (blocksize - 1)); + page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, + mapping_gfp_mask(mapping) & ~__GFP_FS); + if (!page) + return -ENOMEM; + + err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, + from, length, flags); - return ext4_block_zero_page_range(handle, mapping, from, length); + unlock_page(page); + page_cache_release(page); + return err; } /* - * ext4_block_zero_page_range() zeros out a mapping of length 'length' - * starting from file offset 'from'. The range to be zero'd must - * be contained with in one block. If the specified range exceeds - * the end of the block it will be shortened to end of the block - * that cooresponds to 'from' + * ext4_discard_partial_page_buffers_no_lock() + * Zeros a page range of length 'length' starting from offset 'from'. + * Buffer heads that correspond to the block aligned regions of the + * zeroed range will be unmapped. Unblock aligned regions + * will have the corresponding buffer head mapped if needed so that + * that region of the page can be updated with the partial zero out. + * + * This function assumes that the page has already been locked. The + * The range to be discarded must be contained with in the given page. + * If the specified range exceeds the end of the page it will be shortened + * to the end of the page that corresponds to 'from'. This function is + * appropriate for updating a page and it buffer heads to be unmapped and + * zeroed for blocks that have been either released, or are going to be + * released. + * + * handle: The journal handle + * inode: The files inode + * page: A locked page that contains the offset "from" + * from: The starting byte offset (from the beginning of the file) + * to begin discarding + * len: The length of bytes to discard + * flags: Optional flags that may be used: + * + * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED + * Only zero the regions of the page whose buffer heads + * have already been unmapped. This flag is appropriate + * for updating the contents of a page whose blocks may + * have already been released, and we only want to zero + * out the regions that correspond to those released blocks. + * + * Returns zero on success or negative on failure. */ -int ext4_block_zero_page_range(handle_t *handle, - struct address_space *mapping, loff_t from, loff_t length) +static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, + struct inode *inode, struct page *page, loff_t from, + loff_t length, int flags) { ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); - unsigned blocksize, max, pos; + unsigned int offset = from & (PAGE_CACHE_SIZE-1); + unsigned int blocksize, max, pos; ext4_lblk_t iblock; - struct inode *inode = mapping->host; struct buffer_head *bh; - struct page *page; int err = 0; - page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, - mapping_gfp_mask(mapping) & ~__GFP_FS); - if (!page) - return -ENOMEM; - blocksize = inode->i_sb->s_blocksize; - max = blocksize - (offset & (blocksize - 1)); + max = PAGE_CACHE_SIZE - offset; + + if (index != page->index) + return -EINVAL; /* * correct length if it does not fall between - * 'from' and the end of the block + * 'from' and the end of the page */ if (length > max || length < 0) length = max; @@ -3373,91 +3464,106 @@ int ext4_block_zero_page_range(handle_t *handle, iblock++; pos += blocksize; } - if (buffer_freed(bh)) { - BUFFER_TRACE(bh, "freed: skip"); - goto unlock; - } - if (!buffer_mapped(bh)) { - BUFFER_TRACE(bh, "unmapped"); - ext4_get_block(inode, iblock, bh, 0); - /* unmapped? It's a hole - nothing to do */ - if (!buffer_mapped(bh)) { - BUFFER_TRACE(bh, "still unmapped"); - goto unlock; - } - } - - /* Ok, it's mapped. Make sure it's up-to-date */ - if (PageUptodate(page)) - set_buffer_uptodate(bh); - if (!buffer_uptodate(bh)) { - err = -EIO; - ll_rw_block(READ, 1, &bh); - wait_on_buffer(bh); - /* Uhhuh. Read error. Complain and punt. */ - if (!buffer_uptodate(bh)) - goto unlock; - } - if (ext4_should_journal_data(inode)) { - BUFFER_TRACE(bh, "get write access"); - err = ext4_journal_get_write_access(handle, bh); - if (err) - goto unlock; - } - zero_user(page, offset, length); - BUFFER_TRACE(bh, "zeroed end of block"); + pos = offset; + while (pos < offset + length) { + unsigned int end_of_block, range_to_discard; - if (ext4_should_journal_data(inode)) { - err = ext4_handle_dirty_metadata(handle, inode, bh); - } else { err = 0; - mark_buffer_dirty(bh); - if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) - err = ext4_jbd2_file_inode(handle, inode); - } -unlock: - unlock_page(page); - page_cache_release(page); - return err; -} + /* The length of space left to zero and unmap */ + range_to_discard = offset + length - pos; -int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, - loff_t lstart, loff_t length) -{ - struct super_block *sb = inode->i_sb; - struct address_space *mapping = inode->i_mapping; - unsigned partial_start, partial_end; - ext4_fsblk_t start, end; - loff_t byte_end = (lstart + length - 1); - int err = 0; + /* The length of space until the end of the block */ + end_of_block = blocksize - (pos & (blocksize-1)); - partial_start = lstart & (sb->s_blocksize - 1); - partial_end = byte_end & (sb->s_blocksize - 1); + /* + * Do not unmap or zero past end of block + * for this buffer head + */ + if (range_to_discard > end_of_block) + range_to_discard = end_of_block; - start = lstart >> sb->s_blocksize_bits; - end = byte_end >> sb->s_blocksize_bits; - /* Handle partial zero within the single block */ - if (start == end && - (partial_start || (partial_end != sb->s_blocksize - 1))) { - err = ext4_block_zero_page_range(handle, mapping, - lstart, length); - return err; - } - /* Handle partial zero out on the start of the range */ - if (partial_start) { - err = ext4_block_zero_page_range(handle, mapping, - lstart, sb->s_blocksize); - if (err) - return err; + /* + * Skip this buffer head if we are only zeroing unampped + * regions of the page + */ + if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && + buffer_mapped(bh)) + goto next; + + /* If the range is block aligned, unmap */ + if (range_to_discard == blocksize) { + clear_buffer_dirty(bh); + bh->b_bdev = NULL; + clear_buffer_mapped(bh); + clear_buffer_req(bh); + clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); + clear_buffer_uptodate(bh); + zero_user(page, pos, range_to_discard); + BUFFER_TRACE(bh, "Buffer discarded"); + goto next; + } + + /* + * If this block is not completely contained in the range + * to be discarded, then it is not going to be released. Because + * we need to keep this block, we need to make sure this part + * of the page is uptodate before we modify it by writeing + * partial zeros on it. + */ + if (!buffer_mapped(bh)) { + /* + * Buffer head must be mapped before we can read + * from the block + */ + BUFFER_TRACE(bh, "unmapped"); + ext4_get_block(inode, iblock, bh, 0); + /* unmapped? It's a hole - nothing to do */ + if (!buffer_mapped(bh)) { + BUFFER_TRACE(bh, "still unmapped"); + goto next; + } + } + + /* Ok, it's mapped. Make sure it's up-to-date */ + if (PageUptodate(page)) + set_buffer_uptodate(bh); + + if (!buffer_uptodate(bh)) { + err = -EIO; + ll_rw_block(READ, 1, &bh); + wait_on_buffer(bh); + /* Uhhuh. Read error. Complain and punt.*/ + if (!buffer_uptodate(bh)) + goto next; + } + + if (ext4_should_journal_data(inode)) { + BUFFER_TRACE(bh, "get write access"); + err = ext4_journal_get_write_access(handle, bh); + if (err) + goto next; + } + + zero_user(page, pos, range_to_discard); + + err = 0; + if (ext4_should_journal_data(inode)) { + err = ext4_handle_dirty_metadata(handle, inode, bh); + } else + mark_buffer_dirty(bh); + + BUFFER_TRACE(bh, "Partial buffer zeroed"); +next: + bh = bh->b_this_page; + iblock++; + pos += range_to_discard; } - /* Handle partial zero out on the end of the range */ - if (partial_end != sb->s_blocksize - 1) - err = ext4_block_zero_page_range(handle, mapping, - byte_end - partial_end, - partial_end + 1); + return err; } @@ -3483,12 +3589,14 @@ int ext4_can_truncate(struct inode *inode) * Returns: 0 on success or negative on failure */ -int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) +int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) { + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; ext4_lblk_t first_block, stop_block; struct address_space *mapping = inode->i_mapping; - loff_t first_block_offset, last_block_offset; + loff_t first_page, last_page, page_len; + loff_t first_page_offset, last_page_offset; handle_t *handle; unsigned int credits; int ret = 0; @@ -3539,16 +3647,23 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) offset; } - first_block_offset = round_up(offset, sb->s_blocksize); - last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; + first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + last_page = (offset + length) >> PAGE_CACHE_SHIFT; - /* Now release the pages and zero block aligned part of pages*/ - if (last_block_offset > first_block_offset) - truncate_pagecache_range(inode, first_block_offset, - last_block_offset); + first_page_offset = first_page << PAGE_CACHE_SHIFT; + last_page_offset = last_page << PAGE_CACHE_SHIFT; + + /* Now release the pages */ + if (last_page_offset > first_page_offset) { + truncate_pagecache_range(inode, first_page_offset, + last_page_offset - 1); + } /* Wait all existing dio workers, newcomers will block on i_mutex */ ext4_inode_block_unlocked_dio(inode); + ret = ext4_flush_unwritten_io(inode); + if (ret) + goto out_dio; inode_dio_wait(inode); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) @@ -3562,10 +3677,66 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) goto out_dio; } - ret = ext4_zero_partial_blocks(handle, inode, offset, - length); - if (ret) - goto out_stop; + /* + * Now we need to zero out the non-page-aligned data in the + * pages at the start and tail of the hole, and unmap the + * buffer heads for the block aligned regions of the page that + * were completely zeroed. + */ + if (first_page > last_page) { + /* + * If the file space being truncated is contained + * within a page just zero out and unmap the middle of + * that page + */ + ret = ext4_discard_partial_page_buffers(handle, + mapping, offset, length, 0); + + if (ret) + goto out_stop; + } else { + /* + * zero out and unmap the partial page that contains + * the start of the hole + */ + page_len = first_page_offset - offset; + if (page_len > 0) { + ret = ext4_discard_partial_page_buffers(handle, mapping, + offset, page_len, 0); + if (ret) + goto out_stop; + } + + /* + * zero out and unmap the partial page that contains + * the end of the hole + */ + page_len = offset + length - last_page_offset; + if (page_len > 0) { + ret = ext4_discard_partial_page_buffers(handle, mapping, + last_page_offset, page_len, 0); + if (ret) + goto out_stop; + } + } + + /* + * If i_size is contained in the last page, we need to + * unmap and zero the partial page after i_size + */ + if (inode->i_size >> PAGE_CACHE_SHIFT == last_page && + inode->i_size % PAGE_CACHE_SIZE != 0) { + page_len = PAGE_CACHE_SIZE - + (inode->i_size & (PAGE_CACHE_SIZE - 1)); + + if (page_len > 0) { + ret = ext4_discard_partial_page_buffers(handle, + mapping, inode->i_size, page_len, 0); + + if (ret) + goto out_stop; + } + } first_block = (offset + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); @@ -3641,6 +3812,7 @@ void ext4_truncate(struct inode *inode) unsigned int credits; handle_t *handle; struct address_space *mapping = inode->i_mapping; + loff_t page_len; /* * There is a possibility that we're either freeing the inode @@ -3667,6 +3839,12 @@ void ext4_truncate(struct inode *inode) return; } + /* + * finish any pending end_io work so we won't run the risk of + * converting any truncated blocks to initialized later + */ + ext4_flush_unwritten_io(inode); + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else @@ -3678,8 +3856,14 @@ void ext4_truncate(struct inode *inode) return; } - if (inode->i_size & (inode->i_sb->s_blocksize - 1)) - ext4_block_truncate_page(handle, mapping, inode->i_size); + if (inode->i_size % PAGE_CACHE_SIZE != 0) { + page_len = PAGE_CACHE_SIZE - + (inode->i_size & (PAGE_CACHE_SIZE - 1)); + + if (ext4_discard_partial_page_buffers(handle, + mapping, inode->i_size, page_len, 0)) + goto out_stop; + } /* * We add the inode to the orphan list, so that if this @@ -4448,8 +4632,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode) inode->i_size >> PAGE_CACHE_SHIFT); if (!page) return; - ret = __ext4_journalled_invalidatepage(page, offset, - PAGE_CACHE_SIZE - offset); + ret = __ext4_journalled_invalidatepage(page, offset); unlock_page(page); page_cache_release(page); if (ret != -EBUSY) @@ -4631,7 +4814,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode; - unsigned long long delalloc_blocks; + unsigned long delalloc_blocks; inode = dentry->d_inode; generic_fillattr(inode, stat); @@ -4649,16 +4832,15 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), EXT4_I(inode)->i_reserved_data_blocks); - stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9); + stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; return 0; } -static int ext4_index_trans_blocks(struct inode *inode, int lblocks, - int pextents) +static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) { if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - return ext4_ind_trans_blocks(inode, lblocks); - return ext4_ext_index_trans_blocks(inode, pextents); + return ext4_ind_trans_blocks(inode, nrblocks, chunk); + return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); } /* @@ -4672,8 +4854,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int lblocks, * * Also account for superblock, inode, quota and xattr blocks */ -static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, - int pextents) +static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) { ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); int gdpblocks; @@ -4681,10 +4862,14 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int ret = 0; /* - * How many index blocks need to touch to map @lblocks logical blocks - * to @pextents physical extents? + * How many index blocks need to touch to modify nrblocks? + * The "Chunk" flag indicating whether the nrblocks is + * physically contiguous on disk + * + * For Direct IO and fallocate, they calls get_block to allocate + * one single extent at a time, so they could set the "Chunk" flag */ - idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); + idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); ret = idxblocks; @@ -4692,7 +4877,12 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, * Now let's see how many group bitmaps and group descriptors need * to account */ - groups = idxblocks + pextents; + groups = idxblocks; + if (chunk) + groups += 1; + else + groups += nrblocks; + gdpblocks = groups; if (groups > ngroups) groups = ngroups; @@ -4723,7 +4913,7 @@ int ext4_writepage_trans_blocks(struct inode *inode) int bpp = ext4_journal_blocks_per_page(inode); int ret; - ret = ext4_meta_trans_blocks(inode, bpp, bpp); + ret = ext4_meta_trans_blocks(inode, bpp, 0); /* Account for data blocks for journalled mode */ if (ext4_should_journal_data(inode)) diff --git a/trunk/fs/ext4/mballoc.c b/trunk/fs/ext4/mballoc.c index a9ff5e5137ca..b1ed9e07434b 100644 --- a/trunk/fs/ext4/mballoc.c +++ b/trunk/fs/ext4/mballoc.c @@ -2105,12 +2105,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) group = ac->ac_g_ex.fe_group; for (i = 0; i < ngroups; group++, i++) { - cond_resched(); - /* - * Artificially restricted ngroups for non-extent - * files makes group > ngroups possible on first loop. - */ - if (group >= ngroups) + if (group == ngroups) group = 0; /* This now checks without needing the buddy page */ @@ -4406,20 +4401,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, repeat: /* allocate space in core */ *errp = ext4_mb_regular_allocator(ac); - if (*errp) - goto discard_and_exit; - - /* as we've just preallocated more space than - * user requested originally, we store allocated - * space in a special descriptor */ - if (ac->ac_status == AC_STATUS_FOUND && - ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) - *errp = ext4_mb_new_preallocation(ac); if (*errp) { - discard_and_exit: ext4_discard_allocated_blocks(ac); goto errout; } + + /* as we've just preallocated more space than + * user requested orinally, we store allocated + * space in a special descriptor */ + if (ac->ac_status == AC_STATUS_FOUND && + ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) + ext4_mb_new_preallocation(ac); } if (likely(ac->ac_status == AC_STATUS_FOUND)) { *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); @@ -4616,11 +4608,10 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, BUG_ON(bh && (count > 1)); for (i = 0; i < count; i++) { - cond_resched(); if (!bh) tbh = sb_find_get_block(inode->i_sb, block + i); - if (!tbh) + if (unlikely(!tbh)) continue; ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, inode, tbh, block + i); diff --git a/trunk/fs/ext4/move_extent.c b/trunk/fs/ext4/move_extent.c index e86dddbd8296..3dcbf364022f 100644 --- a/trunk/fs/ext4/move_extent.c +++ b/trunk/fs/ext4/move_extent.c @@ -912,6 +912,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, struct page *pagep[2] = {NULL, NULL}; handle_t *handle; ext4_lblk_t orig_blk_offset; + long long offs = orig_page_offset << PAGE_CACHE_SHIFT; unsigned long blocksize = orig_inode->i_sb->s_blocksize; unsigned int w_flags = 0; unsigned int tmp_data_size, data_size, replaced_size; @@ -939,6 +940,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, orig_blk_offset = orig_page_offset * blocks_per_page + data_offset_in_page; + offs = (long long)orig_blk_offset << orig_inode->i_blkbits; + /* Calculate data_size */ if ((orig_blk_offset + block_len_in_page - 1) == ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { diff --git a/trunk/fs/ext4/namei.c b/trunk/fs/ext4/namei.c index ab2f6dc44b3a..6653fc35ecb7 100644 --- a/trunk/fs/ext4/namei.c +++ b/trunk/fs/ext4/namei.c @@ -918,8 +918,11 @@ static int htree_dirblock_to_tree(struct file *dir_file, bh->b_data, bh->b_size, (block<i_sb)) + ((char *)de - bh->b_data))) { - /* silently ignore the rest of the block */ - break; + /* On error, skip the f_pos to the next block. */ + dir_file->f_pos = (dir_file->f_pos | + (dir->i_sb->s_blocksize - 1)) + 1; + brelse(bh); + return count; } ext4fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || diff --git a/trunk/fs/ext4/page-io.c b/trunk/fs/ext4/page-io.c index 48786cdb5e6c..19599bded62a 100644 --- a/trunk/fs/ext4/page-io.c +++ b/trunk/fs/ext4/page-io.c @@ -46,82 +46,29 @@ void ext4_exit_pageio(void) } /* - * Print an buffer I/O error compatible with the fs/buffer.c. This - * provides compatibility with dmesg scrapers that look for a specific - * buffer I/O error message. We really need a unified error reporting - * structure to userspace ala Digital Unix's uerf system, but it's - * probably not going to happen in my lifetime, due to LKML politics... + * This function is called by ext4_evict_inode() to make sure there is + * no more pending I/O completion work left to do. */ -static void buffer_io_error(struct buffer_head *bh) +void ext4_ioend_shutdown(struct inode *inode) { - char b[BDEVNAME_SIZE]; - printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n", - bdevname(bh->b_bdev, b), - (unsigned long long)bh->b_blocknr); -} - -static void ext4_finish_bio(struct bio *bio) -{ - int i; - int error = !test_bit(BIO_UPTODATE, &bio->bi_flags); - - for (i = 0; i < bio->bi_vcnt; i++) { - struct bio_vec *bvec = &bio->bi_io_vec[i]; - struct page *page = bvec->bv_page; - struct buffer_head *bh, *head; - unsigned bio_start = bvec->bv_offset; - unsigned bio_end = bio_start + bvec->bv_len; - unsigned under_io = 0; - unsigned long flags; + wait_queue_head_t *wq = ext4_ioend_wq(inode); - if (!page) - continue; - - if (error) { - SetPageError(page); - set_bit(AS_EIO, &page->mapping->flags); - } - bh = head = page_buffers(page); - /* - * We check all buffers in the page under BH_Uptodate_Lock - * to avoid races with other end io clearing async_write flags - */ - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &head->b_state); - do { - if (bh_offset(bh) < bio_start || - bh_offset(bh) + bh->b_size > bio_end) { - if (buffer_async_write(bh)) - under_io++; - continue; - } - clear_buffer_async_write(bh); - if (error) - buffer_io_error(bh); - } while ((bh = bh->b_this_page) != head); - bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); - local_irq_restore(flags); - if (!under_io) - end_page_writeback(page); - } + wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); + /* + * We need to make sure the work structure is finished being + * used before we let the inode get destroyed. + */ + if (work_pending(&EXT4_I(inode)->i_unwritten_work)) + cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); } static void ext4_release_io_end(ext4_io_end_t *io_end) { - struct bio *bio, *next_bio; - BUG_ON(!list_empty(&io_end->list)); BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); - WARN_ON(io_end->handle); if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count)) wake_up_all(ext4_ioend_wq(io_end->inode)); - - for (bio = io_end->bio; bio; bio = next_bio) { - next_bio = bio->bi_private; - ext4_finish_bio(bio); - bio_put(bio); - } if (io_end->flag & EXT4_IO_END_DIRECT) inode_dio_done(io_end->inode); if (io_end->iocb) @@ -139,28 +86,19 @@ static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end) wake_up_all(ext4_ioend_wq(inode)); } -/* - * Check a range of space and convert unwritten extents to written. Note that - * we are protected from truncate touching same part of extent tree by the - * fact that truncate code waits for all DIO to finish (thus exclusion from - * direct IO is achieved) and also waits for PageWriteback bits. Thus we - * cannot get to ext4_ext_truncate() before all IOs overlapping that range are - * completed (happens from ext4_free_ioend()). - */ +/* check a range of space and convert unwritten extents to written. */ static int ext4_end_io(ext4_io_end_t *io) { struct inode *inode = io->inode; loff_t offset = io->offset; ssize_t size = io->size; - handle_t *handle = io->handle; int ret = 0; ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," "list->prev 0x%p\n", io, inode->i_ino, io->list.next, io->list.prev); - io->handle = NULL; /* Following call will use up the handle */ - ret = ext4_convert_unwritten_extents(handle, inode, offset, size); + ret = ext4_convert_unwritten_extents(inode, offset, size); if (ret < 0) { ext4_msg(inode->i_sb, KERN_EMERG, "failed to convert unwritten extents to written " @@ -173,17 +111,20 @@ static int ext4_end_io(ext4_io_end_t *io) return ret; } -static void dump_completed_IO(struct inode *inode, struct list_head *head) +static void dump_completed_IO(struct inode *inode) { #ifdef EXT4FS_DEBUG struct list_head *cur, *before, *after; ext4_io_end_t *io, *io0, *io1; - if (list_empty(head)) + if (list_empty(&EXT4_I(inode)->i_completed_io_list)) { + ext4_debug("inode %lu completed_io list is empty\n", + inode->i_ino); return; + } - ext4_debug("Dump inode %lu completed io list\n", inode->i_ino); - list_for_each_entry(io, head, list) { + ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino); + list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) { cur = &io->list; before = cur->prev; io0 = container_of(before, ext4_io_end_t, list); @@ -204,23 +145,16 @@ static void ext4_add_complete_io(ext4_io_end_t *io_end) unsigned long flags; BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); + wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; + spin_lock_irqsave(&ei->i_completed_io_lock, flags); - if (io_end->handle) { - wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq; - if (list_empty(&ei->i_rsv_conversion_list)) - queue_work(wq, &ei->i_rsv_conversion_work); - list_add_tail(&io_end->list, &ei->i_rsv_conversion_list); - } else { - wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq; - if (list_empty(&ei->i_unrsv_conversion_list)) - queue_work(wq, &ei->i_unrsv_conversion_work); - list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list); - } + if (list_empty(&ei->i_completed_io_list)) + queue_work(wq, &ei->i_unwritten_work); + list_add_tail(&io_end->list, &ei->i_completed_io_list); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); } -static int ext4_do_flush_completed_IO(struct inode *inode, - struct list_head *head) +static int ext4_do_flush_completed_IO(struct inode *inode) { ext4_io_end_t *io; struct list_head unwritten; @@ -229,8 +163,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode, int err, ret = 0; spin_lock_irqsave(&ei->i_completed_io_lock, flags); - dump_completed_IO(inode, head); - list_replace_init(head, &unwritten); + dump_completed_IO(inode); + list_replace_init(&ei->i_completed_io_list, &unwritten); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); while (!list_empty(&unwritten)) { @@ -246,20 +180,23 @@ static int ext4_do_flush_completed_IO(struct inode *inode, } /* - * work on completed IO, to convert unwritten extents to extents + * work on completed aio dio IO, to convert unwritten extents to extents */ -void ext4_end_io_rsv_work(struct work_struct *work) +void ext4_end_io_work(struct work_struct *work) { struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, - i_rsv_conversion_work); - ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list); + i_unwritten_work); + ext4_do_flush_completed_IO(&ei->vfs_inode); } -void ext4_end_io_unrsv_work(struct work_struct *work) +int ext4_flush_unwritten_io(struct inode *inode) { - struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, - i_unrsv_conversion_work); - ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list); + int ret; + WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) && + !(inode->i_state & I_FREEING)); + ret = ext4_do_flush_completed_IO(inode); + ext4_unwritten_wait(inode); + return ret; } ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) @@ -291,10 +228,8 @@ int ext4_put_io_end(ext4_io_end_t *io_end) if (atomic_dec_and_test(&io_end->count)) { if (io_end->flag & EXT4_IO_END_UNWRITTEN) { - err = ext4_convert_unwritten_extents(io_end->handle, - io_end->inode, io_end->offset, - io_end->size); - io_end->handle = NULL; + err = ext4_convert_unwritten_extents(io_end->inode, + io_end->offset, io_end->size); ext4_clear_io_unwritten_flag(io_end); } ext4_release_io_end(io_end); @@ -308,31 +243,79 @@ ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) return io_end; } +/* + * Print an buffer I/O error compatible with the fs/buffer.c. This + * provides compatibility with dmesg scrapers that look for a specific + * buffer I/O error message. We really need a unified error reporting + * structure to userspace ala Digital Unix's uerf system, but it's + * probably not going to happen in my lifetime, due to LKML politics... + */ +static void buffer_io_error(struct buffer_head *bh) +{ + char b[BDEVNAME_SIZE]; + printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n", + bdevname(bh->b_bdev, b), + (unsigned long long)bh->b_blocknr); +} + static void ext4_end_bio(struct bio *bio, int error) { ext4_io_end_t *io_end = bio->bi_private; + struct inode *inode; + int i; + int blocksize; sector_t bi_sector = bio->bi_sector; BUG_ON(!io_end); + inode = io_end->inode; + blocksize = 1 << inode->i_blkbits; + bio->bi_private = NULL; bio->bi_end_io = NULL; if (test_bit(BIO_UPTODATE, &bio->bi_flags)) error = 0; + for (i = 0; i < bio->bi_vcnt; i++) { + struct bio_vec *bvec = &bio->bi_io_vec[i]; + struct page *page = bvec->bv_page; + struct buffer_head *bh, *head; + unsigned bio_start = bvec->bv_offset; + unsigned bio_end = bio_start + bvec->bv_len; + unsigned under_io = 0; + unsigned long flags; - if (io_end->flag & EXT4_IO_END_UNWRITTEN) { + if (!page) + continue; + + if (error) { + SetPageError(page); + set_bit(AS_EIO, &page->mapping->flags); + } + bh = head = page_buffers(page); /* - * Link bio into list hanging from io_end. We have to do it - * atomically as bio completions can be racing against each - * other. + * We check all buffers in the page under BH_Uptodate_Lock + * to avoid races with other end io clearing async_write flags */ - bio->bi_private = xchg(&io_end->bio, bio); - } else { - ext4_finish_bio(bio); - bio_put(bio); + local_irq_save(flags); + bit_spin_lock(BH_Uptodate_Lock, &head->b_state); + do { + if (bh_offset(bh) < bio_start || + bh_offset(bh) + blocksize > bio_end) { + if (buffer_async_write(bh)) + under_io++; + continue; + } + clear_buffer_async_write(bh); + if (error) + buffer_io_error(bh); + } while ((bh = bh->b_this_page) != head); + bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); + local_irq_restore(flags); + if (!under_io) + end_page_writeback(page); } + bio_put(bio); if (error) { - struct inode *inode = io_end->inode; - + io_end->flag |= EXT4_IO_END_ERROR; ext4_warning(inode->i_sb, "I/O error writing to inode %lu " "(offset %llu size %ld starting block %llu)", inode->i_ino, @@ -341,6 +324,7 @@ static void ext4_end_bio(struct bio *bio, int error) (unsigned long long) bi_sector >> (inode->i_blkbits - 9)); } + ext4_put_io_end_defer(io_end); } @@ -372,12 +356,13 @@ static int io_submit_init_bio(struct ext4_io_submit *io, struct bio *bio; bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); - if (!bio) - return -ENOMEM; bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; bio->bi_end_io = ext4_end_bio; bio->bi_private = ext4_get_io_end(io->io_end); + if (!io->io_end->size) + io->io_end->offset = (bh->b_page->index << PAGE_CACHE_SHIFT) + + bh_offset(bh); io->io_bio = bio; io->io_next_block = bh->b_blocknr; return 0; @@ -387,6 +372,7 @@ static int io_submit_add_bh(struct ext4_io_submit *io, struct inode *inode, struct buffer_head *bh) { + ext4_io_end_t *io_end; int ret; if (io->io_bio && bh->b_blocknr != io->io_next_block) { @@ -401,6 +387,10 @@ static int io_submit_add_bh(struct ext4_io_submit *io, ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); if (ret != bh->b_size) goto submit_and_retry; + io_end = io->io_end; + if (test_clear_buffer_uninit(bh)) + ext4_set_io_unwritten_flag(inode, io_end); + io_end->size += bh->b_size; io->io_next_block++; return 0; } diff --git a/trunk/fs/ext4/resize.c b/trunk/fs/ext4/resize.c index c5adbb318a90..b27c96d01965 100644 --- a/trunk/fs/ext4/resize.c +++ b/trunk/fs/ext4/resize.c @@ -79,20 +79,12 @@ static int verify_group_input(struct super_block *sb, ext4_fsblk_t end = start + input->blocks_count; ext4_group_t group = input->group; ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; - unsigned overhead; - ext4_fsblk_t metaend; + unsigned overhead = ext4_group_overhead_blocks(sb, group); + ext4_fsblk_t metaend = start + overhead; struct buffer_head *bh = NULL; ext4_grpblk_t free_blocks_count, offset; int err = -EINVAL; - if (group != sbi->s_groups_count) { - ext4_warning(sb, "Cannot add at group %u (only %u groups)", - input->group, sbi->s_groups_count); - return -EINVAL; - } - - overhead = ext4_group_overhead_blocks(sb, group); - metaend = start + overhead; input->free_blocks_count = free_blocks_count = input->blocks_count - 2 - overhead - sbi->s_itb_per_group; @@ -104,7 +96,10 @@ static int verify_group_input(struct super_block *sb, free_blocks_count, input->reserved_blocks); ext4_get_group_no_and_offset(sb, start, NULL, &offset); - if (offset != 0) + if (group != sbi->s_groups_count) + ext4_warning(sb, "Cannot add at group %u (only %u groups)", + input->group, sbi->s_groups_count); + else if (offset != 0) ext4_warning(sb, "Last group not full"); else if (input->reserved_blocks > input->blocks_count / 5) ext4_warning(sb, "Reserved blocks too high (%u)", @@ -1556,10 +1551,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) int reserved_gdb = ext4_bg_has_super(sb, input->group) ? le16_to_cpu(es->s_reserved_gdt_blocks) : 0; struct inode *inode = NULL; - int gdb_off; + int gdb_off, gdb_num; int err; __u16 bg_flags = 0; + gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, @@ -1660,10 +1656,12 @@ static int ext4_group_extend_no_check(struct super_block *sb, err = err2; if (!err) { + ext4_fsblk_t first_block; + first_block = ext4_group_first_block_no(sb, 0); if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: extended group to %llu " "blocks\n", ext4_blocks_count(es)); - update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, + update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr - first_block, (char *)es, sizeof(struct ext4_super_block), 0); } return err; diff --git a/trunk/fs/ext4/super.c b/trunk/fs/ext4/super.c index 85b3dd60169b..94cc84db7c9a 100644 --- a/trunk/fs/ext4/super.c +++ b/trunk/fs/ext4/super.c @@ -69,7 +69,6 @@ static void ext4_mark_recovery_complete(struct super_block *sb, static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); -static int ext4_sync_fs_nojournal(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); static int ext4_unfreeze(struct super_block *sb); @@ -399,11 +398,6 @@ static void ext4_handle_error(struct super_block *sb) } if (test_opt(sb, ERRORS_RO)) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); - /* - * Make sure updated value of ->s_mount_flags will be visible - * before ->s_flags update - */ - smp_wmb(); sb->s_flags |= MS_RDONLY; } if (test_opt(sb, ERRORS_PANIC)) @@ -428,9 +422,9 @@ void __ext4_error(struct super_block *sb, const char *function, ext4_handle_error(sb); } -void __ext4_error_inode(struct inode *inode, const char *function, - unsigned int line, ext4_fsblk_t block, - const char *fmt, ...) +void ext4_error_inode(struct inode *inode, const char *function, + unsigned int line, ext4_fsblk_t block, + const char *fmt, ...) { va_list args; struct va_format vaf; @@ -457,9 +451,9 @@ void __ext4_error_inode(struct inode *inode, const char *function, ext4_handle_error(inode->i_sb); } -void __ext4_error_file(struct file *file, const char *function, - unsigned int line, ext4_fsblk_t block, - const char *fmt, ...) +void ext4_error_file(struct file *file, const char *function, + unsigned int line, ext4_fsblk_t block, + const char *fmt, ...) { va_list args; struct va_format vaf; @@ -576,13 +570,8 @@ void __ext4_abort(struct super_block *sb, const char *function, if ((sb->s_flags & MS_RDONLY) == 0) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); - EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; - /* - * Make sure updated value of ->s_mount_flags will be visible - * before ->s_flags update - */ - smp_wmb(); sb->s_flags |= MS_RDONLY; + EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; if (EXT4_SB(sb)->s_journal) jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); save_error_info(sb, function, line); @@ -591,8 +580,7 @@ void __ext4_abort(struct super_block *sb, const char *function, panic("EXT4-fs panic from previous error\n"); } -void __ext4_msg(struct super_block *sb, - const char *prefix, const char *fmt, ...) +void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; @@ -762,10 +750,8 @@ static void ext4_put_super(struct super_block *sb) ext4_unregister_li_request(sb); dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); - flush_workqueue(sbi->unrsv_conversion_wq); - flush_workqueue(sbi->rsv_conversion_wq); - destroy_workqueue(sbi->unrsv_conversion_wq); - destroy_workqueue(sbi->rsv_conversion_wq); + flush_workqueue(sbi->dio_unwritten_wq); + destroy_workqueue(sbi->dio_unwritten_wq); if (sbi->s_journal) { err = jbd2_journal_destroy(sbi->s_journal); @@ -774,7 +760,7 @@ static void ext4_put_super(struct super_block *sb) ext4_abort(sb, "Couldn't clean up the journal"); } - ext4_es_unregister_shrinker(sbi); + ext4_es_unregister_shrinker(sb); del_timer(&sbi->s_err_report); ext4_release_system_zone(sb); ext4_mb_release(sb); @@ -863,7 +849,6 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) rwlock_init(&ei->i_es_lock); INIT_LIST_HEAD(&ei->i_es_lru); ei->i_es_lru_nr = 0; - ei->i_touch_when = 0; ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; @@ -874,15 +859,13 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) ei->i_reserved_quota = 0; #endif ei->jinode = NULL; - INIT_LIST_HEAD(&ei->i_rsv_conversion_list); - INIT_LIST_HEAD(&ei->i_unrsv_conversion_list); + INIT_LIST_HEAD(&ei->i_completed_io_list); spin_lock_init(&ei->i_completed_io_lock); ei->i_sync_tid = 0; ei->i_datasync_tid = 0; atomic_set(&ei->i_ioend_count, 0); atomic_set(&ei->i_unwritten, 0); - INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); - INIT_WORK(&ei->i_unrsv_conversion_work, ext4_end_io_unrsv_work); + INIT_WORK(&ei->i_unwritten_work, ext4_end_io_work); return &ei->vfs_inode; } @@ -1110,7 +1093,6 @@ static const struct super_operations ext4_nojournal_sops = { .dirty_inode = ext4_dirty_inode, .drop_inode = ext4_drop_inode, .evict_inode = ext4_evict_inode, - .sync_fs = ext4_sync_fs_nojournal, .put_super = ext4_put_super, .statfs = ext4_statfs, .remount_fs = ext4_remount, @@ -1926,6 +1908,7 @@ static int ext4_fill_flex_info(struct super_block *sb) struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; ext4_group_t flex_group; + unsigned int groups_per_flex = 0; int i, err; sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; @@ -1933,6 +1916,7 @@ static int ext4_fill_flex_info(struct super_block *sb) sbi->s_log_groups_per_flex = 0; return 1; } + groups_per_flex = 1U << sbi->s_log_groups_per_flex; err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); if (err) @@ -2180,22 +2164,19 @@ static void ext4_orphan_cleanup(struct super_block *sb, list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { - if (test_opt(sb, DEBUG)) - ext4_msg(sb, KERN_DEBUG, - "%s: truncating inode %lu to %lld bytes", - __func__, inode->i_ino, inode->i_size); + ext4_msg(sb, KERN_DEBUG, + "%s: truncating inode %lu to %lld bytes", + __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %lld bytes\n", inode->i_ino, inode->i_size); mutex_lock(&inode->i_mutex); - truncate_inode_pages(inode->i_mapping, inode->i_size); ext4_truncate(inode); mutex_unlock(&inode->i_mutex); nr_truncates++; } else { - if (test_opt(sb, DEBUG)) - ext4_msg(sb, KERN_DEBUG, - "%s: deleting unreferenced inode %lu", - __func__, inode->i_ino); + ext4_msg(sb, KERN_DEBUG, + "%s: deleting unreferenced inode %lu", + __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; @@ -2396,10 +2377,7 @@ struct ext4_attr { ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *); ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *, const char *, size_t); - union { - int offset; - int deprecated_val; - } u; + int offset; }; static int parse_strtoull(const char *buf, @@ -2468,7 +2446,7 @@ static ssize_t inode_readahead_blks_store(struct ext4_attr *a, static ssize_t sbi_ui_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { - unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset); + unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset); return snprintf(buf, PAGE_SIZE, "%u\n", *ui); } @@ -2477,7 +2455,7 @@ static ssize_t sbi_ui_store(struct ext4_attr *a, struct ext4_sb_info *sbi, const char *buf, size_t count) { - unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset); + unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset); unsigned long t; int ret; @@ -2526,20 +2504,12 @@ static ssize_t trigger_test_error(struct ext4_attr *a, return count; } -static ssize_t sbi_deprecated_show(struct ext4_attr *a, - struct ext4_sb_info *sbi, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", a->u.deprecated_val); -} - #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .show = _show, \ .store = _store, \ - .u = { \ - .offset = offsetof(struct ext4_sb_info, _elname),\ - }, \ + .offset = offsetof(struct ext4_sb_info, _elname), \ } #define EXT4_ATTR(name, mode, show, store) \ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store) @@ -2550,14 +2520,6 @@ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store) #define EXT4_RW_ATTR_SBI_UI(name, elname) \ EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname) #define ATTR_LIST(name) &ext4_attr_##name.attr -#define EXT4_DEPRECATED_ATTR(_name, _val) \ -static struct ext4_attr ext4_attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = 0444 }, \ - .show = sbi_deprecated_show, \ - .u = { \ - .deprecated_val = _val, \ - }, \ -} EXT4_RO_ATTR(delayed_allocation_blocks); EXT4_RO_ATTR(session_write_kbytes); @@ -2572,7 +2534,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); -EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128); +EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump); EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb); EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error); @@ -3801,7 +3763,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_err_report.data = (unsigned long) sb; /* Register extent status tree shrinker */ - ext4_es_register_shrinker(sbi); + ext4_es_register_shrinker(sb); err = percpu_counter_init(&sbi->s_freeclusters_counter, ext4_count_free_clusters(sb)); @@ -3825,6 +3787,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } sbi->s_stripe = ext4_get_stripe_size(sbi); + sbi->s_max_writeback_mb_bump = 128; sbi->s_extent_max_zeroout_kb = 32; /* @@ -3952,20 +3915,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) * The maximum number of concurrent works can be high and * concurrency isn't really necessary. Limit it to 1. */ - EXT4_SB(sb)->rsv_conversion_wq = - alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); - if (!EXT4_SB(sb)->rsv_conversion_wq) { - printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); - ret = -ENOMEM; - goto failed_mount4; - } - - EXT4_SB(sb)->unrsv_conversion_wq = - alloc_workqueue("ext4-unrsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); - if (!EXT4_SB(sb)->unrsv_conversion_wq) { - printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); + EXT4_SB(sb)->dio_unwritten_wq = + alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + if (!EXT4_SB(sb)->dio_unwritten_wq) { + printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); ret = -ENOMEM; - goto failed_mount4; + goto failed_mount_wq; } /* @@ -4119,17 +4074,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sb->s_root = NULL; failed_mount4: ext4_msg(sb, KERN_ERR, "mount failed"); - if (EXT4_SB(sb)->rsv_conversion_wq) - destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); - if (EXT4_SB(sb)->unrsv_conversion_wq) - destroy_workqueue(EXT4_SB(sb)->unrsv_conversion_wq); + destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq); failed_mount_wq: if (sbi->s_journal) { jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; } failed_mount3: - ext4_es_unregister_shrinker(sbi); + ext4_es_unregister_shrinker(sb); del_timer(&sbi->s_err_report); if (sbi->s_flex_groups) ext4_kvfree(sbi->s_flex_groups); @@ -4565,52 +4517,19 @@ static int ext4_sync_fs(struct super_block *sb, int wait) { int ret = 0; tid_t target; - bool needs_barrier = false; struct ext4_sb_info *sbi = EXT4_SB(sb); trace_ext4_sync_fs(sb, wait); - flush_workqueue(sbi->rsv_conversion_wq); - flush_workqueue(sbi->unrsv_conversion_wq); + flush_workqueue(sbi->dio_unwritten_wq); /* * Writeback quota in non-journalled quota case - journalled quota has * no dirty dquots */ dquot_writeback_dquots(sb, -1); - /* - * Data writeback is possible w/o journal transaction, so barrier must - * being sent at the end of the function. But we can skip it if - * transaction_commit will do it for us. - */ - target = jbd2_get_latest_transaction(sbi->s_journal); - if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && - !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) - needs_barrier = true; - if (jbd2_journal_start_commit(sbi->s_journal, &target)) { if (wait) - ret = jbd2_log_wait_commit(sbi->s_journal, target); - } - if (needs_barrier) { - int err; - err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); - if (!ret) - ret = err; + jbd2_log_wait_commit(sbi->s_journal, target); } - - return ret; -} - -static int ext4_sync_fs_nojournal(struct super_block *sb, int wait) -{ - int ret = 0; - - trace_ext4_sync_fs(sb, wait); - flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq); - flush_workqueue(EXT4_SB(sb)->unrsv_conversion_wq); - dquot_writeback_dquots(sb, -1); - if (wait && test_opt(sb, BARRIER)) - ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); - return ret; } diff --git a/trunk/fs/f2fs/Kconfig b/trunk/fs/f2fs/Kconfig index e06e0995e00f..fd27e7e6326e 100644 --- a/trunk/fs/f2fs/Kconfig +++ b/trunk/fs/f2fs/Kconfig @@ -51,15 +51,3 @@ config F2FS_FS_POSIX_ACL Linux website . If you don't know what Access Control Lists are, say N - -config F2FS_FS_SECURITY - bool "F2FS Security Labels" - depends on F2FS_FS_XATTR - help - Security labels provide an access control facility to support Linux - Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO - Linux. This option enables an extended attribute handler for file - security labels in the f2fs filesystem, so that it requires enabling - the extended attribute support in advance. - - If you are not using a security module, say N. diff --git a/trunk/fs/f2fs/acl.c b/trunk/fs/f2fs/acl.c index b7826ec1b470..44abc2f286e0 100644 --- a/trunk/fs/f2fs/acl.c +++ b/trunk/fs/f2fs/acl.c @@ -250,7 +250,7 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl) } } - error = f2fs_setxattr(inode, name_index, "", value, size, NULL); + error = f2fs_setxattr(inode, name_index, "", value, size); kfree(value); if (!error) diff --git a/trunk/fs/f2fs/checkpoint.c b/trunk/fs/f2fs/checkpoint.c index 66a6b85a51d8..b1de01da1a40 100644 --- a/trunk/fs/f2fs/checkpoint.c +++ b/trunk/fs/f2fs/checkpoint.c @@ -357,8 +357,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, unsigned long blk_size = sbi->blocksize; struct f2fs_checkpoint *cp_block; unsigned long long cur_version = 0, pre_version = 0; + unsigned int crc = 0; size_t crc_offset; - __u32 crc = 0; /* Read the 1st cp block in this CP pack */ cp_page_1 = get_meta_page(sbi, cp_addr); @@ -369,7 +369,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, if (crc_offset >= blk_size) goto invalid_cp1; - crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset))); + crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); if (!f2fs_crc_valid(crc, cp_block, crc_offset)) goto invalid_cp1; @@ -384,7 +384,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, if (crc_offset >= blk_size) goto invalid_cp2; - crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset))); + crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); if (!f2fs_crc_valid(crc, cp_block, crc_offset)) goto invalid_cp2; @@ -450,29 +450,12 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi) return -EINVAL; } -static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new) -{ - struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); - struct list_head *head = &sbi->dir_inode_list; - struct list_head *this; - - list_for_each(this, head) { - struct dir_inode_entry *entry; - entry = list_entry(this, struct dir_inode_entry, list); - if (entry->inode == inode) - return -EEXIST; - } - list_add_tail(&new->list, head); -#ifdef CONFIG_F2FS_STAT_FS - sbi->n_dirty_dirs++; -#endif - return 0; -} - void set_dirty_dir_page(struct inode *inode, struct page *page) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct list_head *head = &sbi->dir_inode_list; struct dir_inode_entry *new; + struct list_head *this; if (!S_ISDIR(inode->i_mode)) return; @@ -486,31 +469,23 @@ void set_dirty_dir_page(struct inode *inode, struct page *page) INIT_LIST_HEAD(&new->list); spin_lock(&sbi->dir_inode_lock); - if (__add_dirty_inode(inode, new)) - kmem_cache_free(inode_entry_slab, new); + list_for_each(this, head) { + struct dir_inode_entry *entry; + entry = list_entry(this, struct dir_inode_entry, list); + if (entry->inode == inode) { + kmem_cache_free(inode_entry_slab, new); + goto out; + } + } + list_add_tail(&new->list, head); + sbi->n_dirty_dirs++; + BUG_ON(!S_ISDIR(inode->i_mode)); +out: inc_page_count(sbi, F2FS_DIRTY_DENTS); inode_inc_dirty_dents(inode); SetPagePrivate(page); - spin_unlock(&sbi->dir_inode_lock); -} -void add_dirty_dir_inode(struct inode *inode) -{ - struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); - struct dir_inode_entry *new; -retry: - new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS); - if (!new) { - cond_resched(); - goto retry; - } - new->inode = inode; - INIT_LIST_HEAD(&new->list); - - spin_lock(&sbi->dir_inode_lock); - if (__add_dirty_inode(inode, new)) - kmem_cache_free(inode_entry_slab, new); spin_unlock(&sbi->dir_inode_lock); } @@ -524,10 +499,8 @@ void remove_dirty_dir_inode(struct inode *inode) return; spin_lock(&sbi->dir_inode_lock); - if (atomic_read(&F2FS_I(inode)->dirty_dents)) { - spin_unlock(&sbi->dir_inode_lock); - return; - } + if (atomic_read(&F2FS_I(inode)->dirty_dents)) + goto out; list_for_each(this, head) { struct dir_inode_entry *entry; @@ -535,38 +508,12 @@ void remove_dirty_dir_inode(struct inode *inode) if (entry->inode == inode) { list_del(&entry->list); kmem_cache_free(inode_entry_slab, entry); -#ifdef CONFIG_F2FS_STAT_FS sbi->n_dirty_dirs--; -#endif - break; - } - } - spin_unlock(&sbi->dir_inode_lock); - - /* Only from the recovery routine */ - if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) { - clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT); - iput(inode); - } -} - -struct inode *check_dirty_dir_inode(struct f2fs_sb_info *sbi, nid_t ino) -{ - struct list_head *head = &sbi->dir_inode_list; - struct list_head *this; - struct inode *inode = NULL; - - spin_lock(&sbi->dir_inode_lock); - list_for_each(this, head) { - struct dir_inode_entry *entry; - entry = list_entry(this, struct dir_inode_entry, list); - if (entry->inode->i_ino == ino) { - inode = entry->inode; break; } } +out: spin_unlock(&sbi->dir_inode_lock); - return inode; } void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi) @@ -648,7 +595,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) block_t start_blk; struct page *cp_page; unsigned int data_sum_blocks, orphan_blocks; - __u32 crc32 = 0; + unsigned int crc32 = 0; void *kaddr; int i; @@ -717,8 +664,8 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset)); - *((__le32 *)((unsigned char *)ckpt + - le32_to_cpu(ckpt->checksum_offset))) + *(__le32 *)((unsigned char *)ckpt + + le32_to_cpu(ckpt->checksum_offset)) = cpu_to_le32(crc32); start_blk = __start_cp_addr(sbi); diff --git a/trunk/fs/f2fs/data.c b/trunk/fs/f2fs/data.c index 035f9a345cdf..91ff93b0b0f4 100644 --- a/trunk/fs/f2fs/data.c +++ b/trunk/fs/f2fs/data.c @@ -68,9 +68,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs, struct buffer_head *bh_result) { struct f2fs_inode_info *fi = F2FS_I(inode); -#ifdef CONFIG_F2FS_STAT_FS struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); -#endif pgoff_t start_fofs, end_fofs; block_t start_blkaddr; @@ -80,9 +78,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs, return 0; } -#ifdef CONFIG_F2FS_STAT_FS sbi->total_hit_ext++; -#endif start_fofs = fi->ext.fofs; end_fofs = fi->ext.fofs + fi->ext.len - 1; start_blkaddr = fi->ext.blk_addr; @@ -100,9 +96,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs, else bh_result->b_size = UINT_MAX; -#ifdef CONFIG_F2FS_STAT_FS sbi->read_hit_ext++; -#endif read_unlock(&fi->ext.ext_lock); return 1; } @@ -205,7 +199,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) if (dn.data_blkaddr == NEW_ADDR) return ERR_PTR(-EINVAL); - page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); + page = grab_cache_page(mapping, index); if (!page) return ERR_PTR(-ENOMEM); @@ -239,23 +233,18 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index) struct page *page; int err; -repeat: - page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); - if (!page) - return ERR_PTR(-ENOMEM); - set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, LOOKUP_NODE); - if (err) { - f2fs_put_page(page, 1); + if (err) return ERR_PTR(err); - } f2fs_put_dnode(&dn); - if (dn.data_blkaddr == NULL_ADDR) { - f2fs_put_page(page, 1); + if (dn.data_blkaddr == NULL_ADDR) return ERR_PTR(-ENOENT); - } +repeat: + page = grab_cache_page(mapping, index); + if (!page) + return ERR_PTR(-ENOMEM); if (PageUptodate(page)) return page; @@ -285,10 +274,9 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index) * * Also, caller should grab and release a mutex by calling mutex_lock_op() and * mutex_unlock_op(). - * Note that, npage is set only by make_empty_dir. */ -struct page *get_new_data_page(struct inode *inode, - struct page *npage, pgoff_t index, bool new_i_size) +struct page *get_new_data_page(struct inode *inode, pgoff_t index, + bool new_i_size) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct address_space *mapping = inode->i_mapping; @@ -296,20 +284,18 @@ struct page *get_new_data_page(struct inode *inode, struct dnode_of_data dn; int err; - set_new_dnode(&dn, inode, npage, npage, 0); + set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, ALLOC_NODE); if (err) return ERR_PTR(err); if (dn.data_blkaddr == NULL_ADDR) { if (reserve_new_block(&dn)) { - if (!npage) - f2fs_put_dnode(&dn); + f2fs_put_dnode(&dn); return ERR_PTR(-ENOSPC); } } - if (!npage) - f2fs_put_dnode(&dn); + f2fs_put_dnode(&dn); repeat: page = grab_cache_page(mapping, index); if (!page) @@ -339,8 +325,6 @@ struct page *get_new_data_page(struct inode *inode, if (new_i_size && i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); - /* Only the directory inode sets new_i_size */ - set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); mark_inode_dirty_sync(inode); } return page; @@ -497,9 +481,8 @@ int do_write_data_page(struct page *page) * If current allocation needs SSR, * it had better in-place writes for updated data. */ - if (unlikely(old_blk_addr != NEW_ADDR && - !is_cold_data(page) && - need_inplace_update(inode))) { + if (old_blk_addr != NEW_ADDR && !is_cold_data(page) && + need_inplace_update(inode)) { rewrite_data_page(F2FS_SB(inode->i_sb), page, old_blk_addr); } else { @@ -701,27 +684,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, return err; } -static int f2fs_write_end(struct file *file, - struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - struct inode *inode = page->mapping->host; - - SetPageUptodate(page); - set_page_dirty(page); - - if (pos + copied > i_size_read(inode)) { - i_size_write(inode, pos + copied); - mark_inode_dirty(inode); - update_inode_page(inode); - } - - unlock_page(page); - page_cache_release(page); - return copied; -} - static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { @@ -736,8 +698,7 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, get_data_block_ro); } -static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, - unsigned int length) +static void f2fs_invalidate_data_page(struct page *page, unsigned long offset) { struct inode *inode = page->mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); @@ -779,7 +740,7 @@ const struct address_space_operations f2fs_dblock_aops = { .writepage = f2fs_write_data_page, .writepages = f2fs_write_data_pages, .write_begin = f2fs_write_begin, - .write_end = f2fs_write_end, + .write_end = nobh_write_end, .set_page_dirty = f2fs_set_data_page_dirty, .invalidatepage = f2fs_invalidate_data_page, .releasepage = f2fs_release_data_page, diff --git a/trunk/fs/f2fs/debug.c b/trunk/fs/f2fs/debug.c index 0d6c6aafb235..8d9943786c31 100644 --- a/trunk/fs/f2fs/debug.c +++ b/trunk/fs/f2fs/debug.c @@ -175,12 +175,12 @@ static void update_mem_info(struct f2fs_sb_info *sbi) static int stat_show(struct seq_file *s, void *v) { - struct f2fs_stat_info *si; + struct f2fs_stat_info *si, *next; int i = 0; int j; mutex_lock(&f2fs_stat_mutex); - list_for_each_entry(si, &f2fs_stat_list, stat_list) { + list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) { char devname[BDEVNAME_SIZE]; update_general_status(si->sbi); diff --git a/trunk/fs/f2fs/dir.c b/trunk/fs/f2fs/dir.c index 9d1cd423450d..1ac6b93036b7 100644 --- a/trunk/fs/f2fs/dir.c +++ b/trunk/fs/f2fs/dir.c @@ -13,7 +13,6 @@ #include "f2fs.h" #include "node.h" #include "acl.h" -#include "xattr.h" static unsigned long dir_blocks(struct inode *inode) { @@ -216,9 +215,9 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p) { - struct page *page; - struct f2fs_dir_entry *de; - struct f2fs_dentry_block *dentry_blk; + struct page *page = NULL; + struct f2fs_dir_entry *de = NULL; + struct f2fs_dentry_block *dentry_blk = NULL; page = get_lock_data_page(dir, 0); if (IS_ERR(page)) @@ -265,10 +264,15 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, f2fs_put_page(page, 1); } -static void init_dent_inode(const struct qstr *name, struct page *ipage) +void init_dent_inode(const struct qstr *name, struct page *ipage) { struct f2fs_node *rn; + if (IS_ERR(ipage)) + return; + + wait_on_page_writeback(ipage); + /* copy name info. to this inode page */ rn = (struct f2fs_node *)page_address(ipage); rn->i.i_namelen = cpu_to_le32(name->len); @@ -276,15 +280,14 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage) set_page_dirty(ipage); } -static int make_empty_dir(struct inode *inode, - struct inode *parent, struct page *page) +static int make_empty_dir(struct inode *inode, struct inode *parent) { struct page *dentry_page; struct f2fs_dentry_block *dentry_blk; struct f2fs_dir_entry *de; void *kaddr; - dentry_page = get_new_data_page(inode, page, 0, true); + dentry_page = get_new_data_page(inode, 0, true); if (IS_ERR(dentry_page)) return PTR_ERR(dentry_page); @@ -314,76 +317,63 @@ static int make_empty_dir(struct inode *inode, return 0; } -static struct page *init_inode_metadata(struct inode *inode, +static int init_inode_metadata(struct inode *inode, struct inode *dir, const struct qstr *name) { - struct page *page; - int err; - if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { - page = new_inode_page(inode, name); - if (IS_ERR(page)) - return page; + int err; + err = new_inode_page(inode, name); + if (err) + return err; if (S_ISDIR(inode->i_mode)) { - err = make_empty_dir(inode, dir, page); - if (err) - goto error; + err = make_empty_dir(inode, dir); + if (err) { + remove_inode_page(inode); + return err; + } } err = f2fs_init_acl(inode, dir); - if (err) - goto error; - - err = f2fs_init_security(inode, dir, name, page); - if (err) - goto error; - - wait_on_page_writeback(page); + if (err) { + remove_inode_page(inode); + return err; + } } else { - page = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino); - if (IS_ERR(page)) - return page; - - wait_on_page_writeback(page); - set_cold_node(inode, page); + struct page *ipage; + ipage = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); + set_cold_node(inode, ipage); + init_dent_inode(name, ipage); + f2fs_put_page(ipage, 1); } - - init_dent_inode(name, page); - - /* - * This file should be checkpointed during fsync. - * We lost i_pino from now on. - */ if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) { - file_lost_pino(inode); inc_nlink(inode); + update_inode_page(inode); } - return page; - -error: - f2fs_put_page(page, 1); - remove_inode_page(inode); - return ERR_PTR(err); + return 0; } static void update_parent_metadata(struct inode *dir, struct inode *inode, unsigned int current_depth) { + bool need_dir_update = false; + if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { if (S_ISDIR(inode->i_mode)) { inc_nlink(dir); - set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); + need_dir_update = true; } clear_inode_flag(F2FS_I(inode), FI_NEW_INODE); } dir->i_mtime = dir->i_ctime = CURRENT_TIME; if (F2FS_I(dir)->i_current_depth != current_depth) { F2FS_I(dir)->i_current_depth = current_depth; - set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); + need_dir_update = true; } - if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) + if (need_dir_update) update_inode_page(dir); else mark_inode_dirty(dir); @@ -433,7 +423,6 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in struct page *dentry_page = NULL; struct f2fs_dentry_block *dentry_blk = NULL; int slots = GET_DENTRY_SLOTS(namelen); - struct page *page; int err = 0; int i; @@ -459,7 +448,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket)); for (block = bidx; block <= (bidx + nblock - 1); block++) { - dentry_page = get_new_data_page(dir, NULL, block, true); + dentry_page = get_new_data_page(dir, block, true); if (IS_ERR(dentry_page)) return PTR_ERR(dentry_page); @@ -476,13 +465,12 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in ++level; goto start; add_dentry: + err = init_inode_metadata(inode, dir, name); + if (err) + goto fail; + wait_on_page_writeback(dentry_page); - page = init_inode_metadata(inode, dir, name); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto fail; - } de = &dentry_blk->dentry[bit_pos]; de->hash_code = dentry_hash; de->name_len = cpu_to_le16(namelen); @@ -493,14 +481,11 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); set_page_dirty(dentry_page); - /* we don't need to mark_inode_dirty now */ - F2FS_I(inode)->i_pino = dir->i_ino; - update_inode(inode, page); - f2fs_put_page(page, 1); - update_parent_metadata(dir, inode, current_depth); + + /* update parent inode number before releasing dentry page */ + F2FS_I(inode)->i_pino = dir->i_ino; fail: - clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); kunmap(dentry_page); f2fs_put_page(dentry_page, 1); return err; @@ -606,19 +591,24 @@ bool f2fs_empty_dir(struct inode *dir) return true; } -static int f2fs_readdir(struct file *file, struct dir_context *ctx) +static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir) { + unsigned long pos = file->f_pos; struct inode *inode = file_inode(file); unsigned long npages = dir_blocks(inode); + unsigned char *types = NULL; unsigned int bit_pos = 0, start_bit_pos = 0; + int over = 0; struct f2fs_dentry_block *dentry_blk = NULL; struct f2fs_dir_entry *de = NULL; struct page *dentry_page = NULL; - unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK); + unsigned int n = 0; unsigned char d_type = DT_UNKNOWN; int slots; - bit_pos = ((unsigned long)ctx->pos % NR_DENTRY_IN_BLOCK); + types = f2fs_filetype_table; + bit_pos = (pos % NR_DENTRY_IN_BLOCK); + n = (pos / NR_DENTRY_IN_BLOCK); for ( ; n < npages; n++) { dentry_page = get_lock_data_page(inode, n); @@ -628,28 +618,31 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) start_bit_pos = bit_pos; dentry_blk = kmap(dentry_page); while (bit_pos < NR_DENTRY_IN_BLOCK) { + d_type = DT_UNKNOWN; bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, NR_DENTRY_IN_BLOCK, bit_pos); if (bit_pos >= NR_DENTRY_IN_BLOCK) break; - ctx->pos += bit_pos - start_bit_pos; de = &dentry_blk->dentry[bit_pos]; - if (de->file_type < F2FS_FT_MAX) - d_type = f2fs_filetype_table[de->file_type]; - else - d_type = DT_UNKNOWN; - if (!dir_emit(ctx, - dentry_blk->filename[bit_pos], - le16_to_cpu(de->name_len), - le32_to_cpu(de->ino), d_type)) + if (types && de->file_type < F2FS_FT_MAX) + d_type = types[de->file_type]; + + over = filldir(dirent, + dentry_blk->filename[bit_pos], + le16_to_cpu(de->name_len), + (n * NR_DENTRY_IN_BLOCK) + bit_pos, + le32_to_cpu(de->ino), d_type); + if (over) { + file->f_pos += bit_pos - start_bit_pos; goto success; + } slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); bit_pos += slots; } bit_pos = 0; - ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK; + file->f_pos = (n + 1) * NR_DENTRY_IN_BLOCK; kunmap(dentry_page); f2fs_put_page(dentry_page, 1); dentry_page = NULL; @@ -666,7 +659,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) const struct file_operations f2fs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = f2fs_readdir, + .readdir = f2fs_readdir, .fsync = f2fs_sync_file, .unlocked_ioctl = f2fs_ioctl, }; diff --git a/trunk/fs/f2fs/f2fs.h b/trunk/fs/f2fs/f2fs.h index 467d42d65c48..20aab02f2a42 100644 --- a/trunk/fs/f2fs/f2fs.h +++ b/trunk/fs/f2fs/f2fs.h @@ -37,35 +37,21 @@ typecheck(unsigned long long, b) && \ ((long long)((a) - (b)) > 0)) -typedef u32 block_t; /* - * should not change u32, since it is the on-disk block - * address format, __le32. - */ +typedef u64 block_t; typedef u32 nid_t; struct f2fs_mount_info { unsigned int opt; }; -#define CRCPOLY_LE 0xedb88320 - -static inline __u32 f2fs_crc32(void *buf, size_t len) +static inline __u32 f2fs_crc32(void *buff, size_t len) { - unsigned char *p = (unsigned char *)buf; - __u32 crc = F2FS_SUPER_MAGIC; - int i; - - while (len--) { - crc ^= *p++; - for (i = 0; i < 8; i++) - crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); - } - return crc; + return crc32_le(F2FS_SUPER_MAGIC, buff, len); } -static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size) +static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size) { - return f2fs_crc32(buf, buf_size) == blk_crc; + return f2fs_crc32(buff, buff_size) == blk_crc; } /* @@ -162,7 +148,7 @@ struct extent_info { * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. */ #define FADVISE_COLD_BIT 0x01 -#define FADVISE_LOST_PINO_BIT 0x02 +#define FADVISE_CP_BIT 0x02 struct f2fs_inode_info { struct inode vfs_inode; /* serve a vfs inode */ @@ -383,6 +369,7 @@ struct f2fs_sb_info { /* for directory inode management */ struct list_head dir_inode_list; /* dir inode list */ spinlock_t dir_inode_lock; /* for dir inode list lock */ + unsigned int n_dirty_dirs; /* # of dir inodes */ /* basic file system units */ unsigned int log_sectors_per_block; /* log2 sectors per block */ @@ -419,15 +406,12 @@ struct f2fs_sb_info { * for stat information. * one is for the LFS mode, and the other is for the SSR mode. */ -#ifdef CONFIG_F2FS_STAT_FS struct f2fs_stat_info *stat_info; /* FS status information */ unsigned int segment_count[2]; /* # of allocated segments */ unsigned int block_count[2]; /* # of allocated blocks */ + unsigned int last_victim[2]; /* last victim segment # */ int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ int bg_gc; /* background gc calls */ - unsigned int n_dirty_dirs; /* # of dir inodes */ -#endif - unsigned int last_victim[2]; /* last victim segment # */ spinlock_t stat_lock; /* lock for stat operations */ }; @@ -511,17 +495,9 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) static inline void mutex_lock_all(struct f2fs_sb_info *sbi) { - int i; - - for (i = 0; i < NR_GLOBAL_LOCKS; i++) { - /* - * This is the only time we take multiple fs_lock[] - * instances; the order is immaterial since we - * always hold cp_mutex, which serializes multiple - * such operations. - */ - mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex); - } + int i = 0; + for (; i < NR_GLOBAL_LOCKS; i++) + mutex_lock(&sbi->fs_lock[i]); } static inline void mutex_unlock_all(struct f2fs_sb_info *sbi) @@ -867,12 +843,9 @@ static inline int f2fs_clear_bit(unsigned int nr, char *addr) /* used for f2fs_inode_info->flags */ enum { FI_NEW_INODE, /* indicate newly allocated inode */ - FI_DIRTY_INODE, /* indicate inode is dirty or not */ FI_INC_LINK, /* need to increment i_nlink */ FI_ACL_MODE, /* indicate acl mode */ FI_NO_ALLOC, /* should not allocate any blocks */ - FI_UPDATE_DIR, /* should update inode block for consistency */ - FI_DELAY_IPUT, /* used for the recovery */ }; static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) @@ -905,21 +878,14 @@ static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag) return 0; } -static inline int f2fs_readonly(struct super_block *sb) -{ - return sb->s_flags & MS_RDONLY; -} - /* * file.c */ int f2fs_sync_file(struct file *, loff_t, loff_t, int); void truncate_data_blocks(struct dnode_of_data *); void f2fs_truncate(struct inode *); -int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *); int f2fs_setattr(struct dentry *, struct iattr *); int truncate_hole(struct inode *, pgoff_t, pgoff_t); -int truncate_data_blocks_range(struct dnode_of_data *, int); long f2fs_ioctl(struct file *, unsigned int, unsigned long); long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long); @@ -947,6 +913,7 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); ino_t f2fs_inode_by_name(struct inode *, struct qstr *); void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, struct page *, struct inode *); +void init_dent_inode(const struct qstr *, struct page *); int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *); void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *); int f2fs_make_empty(struct inode *, struct inode *); @@ -981,8 +948,8 @@ void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); int truncate_inode_blocks(struct inode *, pgoff_t); int remove_inode_page(struct inode *); -struct page *new_inode_page(struct inode *, const struct qstr *); -struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *); +int new_inode_page(struct inode *, const struct qstr *); +struct page *new_node_page(struct dnode_of_data *, unsigned int); void ra_node_page(struct f2fs_sb_info *, nid_t); struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); struct page *get_node_page_ra(struct page *, int); @@ -1007,6 +974,7 @@ void destroy_node_manager_caches(void); */ void f2fs_balance_fs(struct f2fs_sb_info *); void invalidate_blocks(struct f2fs_sb_info *, block_t); +void locate_dirty_segment(struct f2fs_sb_info *, unsigned int); void clear_prefree_segments(struct f2fs_sb_info *); int npages_for_summary_flush(struct f2fs_sb_info *); void allocate_new_segments(struct f2fs_sb_info *); @@ -1043,9 +1011,7 @@ void remove_orphan_inode(struct f2fs_sb_info *, nid_t); int recover_orphan_inodes(struct f2fs_sb_info *); int get_valid_checkpoint(struct f2fs_sb_info *); void set_dirty_dir_page(struct inode *, struct page *); -void add_dirty_dir_inode(struct inode *); void remove_dirty_dir_inode(struct inode *); -struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t); void sync_dirty_dir_inodes(struct f2fs_sb_info *); void write_checkpoint(struct f2fs_sb_info *, bool); void init_orphan_info(struct f2fs_sb_info *); @@ -1059,7 +1025,7 @@ int reserve_new_block(struct dnode_of_data *); void update_extent_cache(block_t, struct dnode_of_data *); struct page *find_data_page(struct inode *, pgoff_t, bool); struct page *get_lock_data_page(struct inode *, pgoff_t); -struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); +struct page *get_new_data_page(struct inode *, pgoff_t, bool); int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int); int do_write_data_page(struct page *); diff --git a/trunk/fs/f2fs/file.c b/trunk/fs/f2fs/file.c index d2d2b7dbdcc1..1cae864f8dfc 100644 --- a/trunk/fs/f2fs/file.c +++ b/trunk/fs/f2fs/file.c @@ -63,10 +63,9 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, f2fs_put_dnode(&dn); mutex_unlock_op(sbi, ilock); - file_update_time(vma->vm_file); lock_page(page); if (page->mapping != inode->i_mapping || - page_offset(page) > i_size_read(inode) || + page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); err = -EFAULT; @@ -77,7 +76,10 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) - goto mapped; + goto out; + + /* fill the page */ + wait_on_page_writeback(page); /* page is wholly or partially inside EOF */ if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) { @@ -88,9 +90,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, set_page_dirty(page); SetPageUptodate(page); -mapped: - /* fill the page */ - wait_on_page_writeback(page); + file_update_time(vma->vm_file); out: sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(err); @@ -102,24 +102,6 @@ static const struct vm_operations_struct f2fs_file_vm_ops = { .remap_pages = generic_file_remap_pages, }; -static int get_parent_ino(struct inode *inode, nid_t *pino) -{ - struct dentry *dentry; - - inode = igrab(inode); - dentry = d_find_any_alias(inode); - iput(inode); - if (!dentry) - return 0; - - inode = igrab(dentry->d_parent->d_inode); - dput(dentry); - - *pino = inode->i_ino; - iput(inode); - return 1; -} - int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; @@ -132,7 +114,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) .for_reclaim = 0, }; - if (f2fs_readonly(inode->i_sb)) + if (inode->i_sb->s_flags & MS_RDONLY) return 0; trace_f2fs_sync_file_enter(inode); @@ -152,7 +134,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) need_cp = true; - else if (file_wrong_pino(inode)) + else if (is_cp_file(inode)) need_cp = true; else if (!space_for_roll_forward(sbi)) need_cp = true; @@ -160,23 +142,11 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) need_cp = true; if (need_cp) { - nid_t pino; - /* all the dirty node pages should be flushed for POR */ ret = f2fs_sync_fs(inode->i_sb, 1); - if (file_wrong_pino(inode) && inode->i_nlink == 1 && - get_parent_ino(inode, &pino)) { - F2FS_I(inode)->i_pino = pino; - file_got_pino(inode); - mark_inode_dirty_sync(inode); - ret = f2fs_write_inode(inode, NULL); - if (ret) - goto out; - } } else { /* if there is no written node page, write its inode page */ while (!sync_node_pages(sbi, inode->i_ino, &wbc)) { - mark_inode_dirty_sync(inode); ret = f2fs_write_inode(inode, NULL); if (ret) goto out; @@ -198,7 +168,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) return 0; } -int truncate_data_blocks_range(struct dnode_of_data *dn, int count) +static int truncate_data_blocks_range(struct dnode_of_data *dn, int count) { int nr_free = 0, ofs = dn->ofs_in_node; struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); @@ -215,10 +185,10 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count) update_extent_cache(NULL_ADDR, dn); invalidate_blocks(sbi, blkaddr); + dec_valid_block_count(sbi, dn->inode, 1); nr_free++; } if (nr_free) { - dec_valid_block_count(sbi, dn->inode, nr_free); set_page_dirty(dn->node_page); sync_inode_page(dn); } @@ -321,7 +291,7 @@ void f2fs_truncate(struct inode *inode) } } -int f2fs_getattr(struct vfsmount *mnt, +static int f2fs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; @@ -417,7 +387,7 @@ static void fill_zero(struct inode *inode, pgoff_t index, f2fs_balance_fs(sbi); ilock = mutex_lock_op(sbi); - page = get_new_data_page(inode, NULL, index, false); + page = get_new_data_page(inode, index, false); mutex_unlock_op(sbi, ilock); if (!IS_ERR(page)) { @@ -605,10 +575,10 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) int ret; switch (cmd) { - case F2FS_IOC_GETFLAGS: + case FS_IOC_GETFLAGS: flags = fi->i_flags & FS_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); - case F2FS_IOC_SETFLAGS: + case FS_IOC_SETFLAGS: { unsigned int oldflags; diff --git a/trunk/fs/f2fs/gc.c b/trunk/fs/f2fs/gc.c index 35f9b1a196aa..14961593e93c 100644 --- a/trunk/fs/f2fs/gc.c +++ b/trunk/fs/f2fs/gc.c @@ -76,9 +76,7 @@ static int gc_thread_func(void *data) else wait_ms = increase_sleep_time(wait_ms); -#ifdef CONFIG_F2FS_STAT_FS sbi->bg_gc++; -#endif /* if return value is not zero, no victim was selected */ if (f2fs_gc(sbi)) @@ -91,28 +89,23 @@ int start_gc_thread(struct f2fs_sb_info *sbi) { struct f2fs_gc_kthread *gc_th; dev_t dev = sbi->sb->s_bdev->bd_dev; - int err = 0; if (!test_opt(sbi, BG_GC)) - goto out; + return 0; gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); - if (!gc_th) { - err = -ENOMEM; - goto out; - } + if (!gc_th) + return -ENOMEM; sbi->gc_thread = gc_th; init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); if (IS_ERR(gc_th->f2fs_gc_task)) { - err = PTR_ERR(gc_th->f2fs_gc_task); kfree(gc_th); sbi->gc_thread = NULL; + return -ENOMEM; } - -out: - return err; + return 0; } void stop_gc_thread(struct f2fs_sb_info *sbi) @@ -241,14 +234,14 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct victim_sel_policy p; - unsigned int secno, max_cost; + unsigned int secno; int nsearched = 0; p.alloc_mode = alloc_mode; select_policy(sbi, gc_type, type, &p); p.min_segno = NULL_SEGNO; - p.min_cost = max_cost = get_max_cost(sbi, &p); + p.min_cost = get_max_cost(sbi, &p); mutex_lock(&dirty_i->seglist_lock); @@ -287,7 +280,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, p.min_cost = cost; } - if (cost == max_cost) + if (cost == get_max_cost(sbi, &p)) continue; if (nsearched++ >= MAX_VICTIM_SEARCH) { @@ -295,8 +288,8 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, break; } } - if (p.min_segno != NULL_SEGNO) { got_it: + if (p.min_segno != NULL_SEGNO) { if (p.alloc_mode == LFS) { secno = GET_SECNO(sbi, p.min_segno); if (gc_type == FG_GC) @@ -321,21 +314,28 @@ static const struct victim_selection default_v_ops = { static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist) { + struct list_head *this; struct inode_entry *ie; - list_for_each_entry(ie, ilist, list) + list_for_each(this, ilist) { + ie = list_entry(this, struct inode_entry, list); if (ie->inode->i_ino == ino) return ie->inode; + } return NULL; } static void add_gc_inode(struct inode *inode, struct list_head *ilist) { - struct inode_entry *new_ie; + struct list_head *this; + struct inode_entry *new_ie, *ie; - if (inode == find_gc_inode(inode->i_ino, ilist)) { - iput(inode); - return; + list_for_each(this, ilist) { + ie = list_entry(this, struct inode_entry, list); + if (ie->inode == inode) { + iput(inode); + return; + } } repeat: new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS); diff --git a/trunk/fs/f2fs/inode.c b/trunk/fs/f2fs/inode.c index 2b2d45d19e3e..91ac7f9d88ee 100644 --- a/trunk/fs/f2fs/inode.c +++ b/trunk/fs/f2fs/inode.c @@ -109,6 +109,12 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) ret = do_read_inode(inode); if (ret) goto bad_inode; + + if (!sbi->por_doing && inode->i_nlink == 0) { + ret = -ENOENT; + goto bad_inode; + } + make_now: if (ino == F2FS_NODE_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_node_aops; @@ -124,7 +130,8 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); + mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE | + __GFP_ZERO); } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; @@ -192,7 +199,6 @@ void update_inode(struct inode *inode, struct page *node_page) set_cold_node(inode, node_page); set_page_dirty(node_page); - clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE); } int update_inode_page(struct inode *inode) @@ -218,9 +224,6 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) inode->i_ino == F2FS_META_INO(sbi)) return 0; - if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE)) - return 0; - if (wbc) f2fs_balance_fs(sbi); diff --git a/trunk/fs/f2fs/namei.c b/trunk/fs/f2fs/namei.c index 64c07169df05..47abc9722b17 100644 --- a/trunk/fs/f2fs/namei.c +++ b/trunk/fs/f2fs/namei.c @@ -112,7 +112,7 @@ static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode, int count = le32_to_cpu(sbi->raw_super->extension_count); for (i = 0; i < count; i++) { if (is_multimedia_file(name, extlist[i])) { - file_set_cold(inode); + set_cold_file(inode); break; } } @@ -149,7 +149,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, alloc_nid_done(sbi, ino); - d_instantiate(dentry, inode); + if (!sbi->por_doing) + d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out: @@ -172,7 +173,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir, f2fs_balance_fs(sbi); inode->i_ctime = CURRENT_TIME; - ihold(inode); + atomic_inc(&inode->i_count); set_inode_flag(F2FS_I(inode), FI_INC_LINK); ilock = mutex_lock_op(sbi); @@ -181,10 +182,17 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir, if (err) goto out; + /* + * This file should be checkpointed during fsync. + * We lost i_pino from now on. + */ + set_cp_file(inode); + d_instantiate(dentry, inode); return 0; out: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); + make_bad_inode(inode); iput(inode); return err; } @@ -490,7 +498,6 @@ const struct inode_operations f2fs_dir_inode_operations = { .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename, - .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR @@ -505,7 +512,6 @@ const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, - .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, @@ -516,7 +522,6 @@ const struct inode_operations f2fs_symlink_inode_operations = { }; const struct inode_operations f2fs_special_inode_operations = { - .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR diff --git a/trunk/fs/f2fs/node.c b/trunk/fs/f2fs/node.c index b418aee09573..3df43b4efd89 100644 --- a/trunk/fs/f2fs/node.c +++ b/trunk/fs/f2fs/node.c @@ -408,13 +408,10 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) level = get_node_path(index, offset, noffset); nids[0] = dn->inode->i_ino; - npage[0] = dn->inode_page; + npage[0] = get_node_page(sbi, nids[0]); + if (IS_ERR(npage[0])) + return PTR_ERR(npage[0]); - if (!npage[0]) { - npage[0] = get_node_page(sbi, nids[0]); - if (IS_ERR(npage[0])) - return PTR_ERR(npage[0]); - } parent = npage[0]; if (level != 0) nids[1] = get_nid(parent, offset[0], true); @@ -433,7 +430,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) } dn->nid = nids[i]; - npage[i] = new_node_page(dn, noffset[i], NULL); + npage[i] = new_node_page(dn, noffset[i]); if (IS_ERR(npage[i])) { alloc_nid_failed(sbi, nids[i]); err = PTR_ERR(npage[i]); @@ -806,19 +803,22 @@ int remove_inode_page(struct inode *inode) return 0; } -struct page *new_inode_page(struct inode *inode, const struct qstr *name) +int new_inode_page(struct inode *inode, const struct qstr *name) { + struct page *page; struct dnode_of_data dn; /* allocate inode page for new inode */ set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); - - /* caller should f2fs_put_page(page, 1); */ - return new_node_page(&dn, 0, NULL); + page = new_node_page(&dn, 0); + init_dent_inode(name, page); + if (IS_ERR(page)) + return PTR_ERR(page); + f2fs_put_page(page, 1); + return 0; } -struct page *new_node_page(struct dnode_of_data *dn, - unsigned int ofs, struct page *ipage) +struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs) { struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); struct address_space *mapping = sbi->node_inode->i_mapping; @@ -851,10 +851,7 @@ struct page *new_node_page(struct dnode_of_data *dn, set_cold_node(dn->inode, page); dn->node_page = page; - if (ipage) - update_inode(dn->inode, ipage); - else - sync_inode_page(dn); + sync_inode_page(dn); set_page_dirty(page); if (ofs == 0) inc_valid_inode_count(sbi); @@ -1208,8 +1205,7 @@ static int f2fs_set_node_page_dirty(struct page *page) return 0; } -static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, - unsigned int length) +static void f2fs_invalidate_node_page(struct page *page, unsigned long offset) { struct inode *inode = page->mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); @@ -1496,10 +1492,9 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) new_ni = old_ni; new_ni.ino = ino; - if (!inc_valid_node_count(sbi, NULL, 1)) - WARN_ON(1); set_node_addr(sbi, &new_ni, NEW_ADDR); inc_valid_inode_count(sbi); + f2fs_put_page(ipage, 1); return 0; } diff --git a/trunk/fs/f2fs/node.h b/trunk/fs/f2fs/node.h index c65fb4f4230f..0a2d72f0024d 100644 --- a/trunk/fs/f2fs/node.h +++ b/trunk/fs/f2fs/node.h @@ -275,27 +275,25 @@ static inline nid_t get_nid(struct page *p, int off, bool i) * - Mark cold node blocks in their node footer * - Mark cold data pages in page cache */ -static inline int is_file(struct inode *inode, int type) +static inline int is_cold_file(struct inode *inode) { - return F2FS_I(inode)->i_advise & type; + return F2FS_I(inode)->i_advise & FADVISE_COLD_BIT; } -static inline void set_file(struct inode *inode, int type) +static inline void set_cold_file(struct inode *inode) { - F2FS_I(inode)->i_advise |= type; + F2FS_I(inode)->i_advise |= FADVISE_COLD_BIT; } -static inline void clear_file(struct inode *inode, int type) +static inline int is_cp_file(struct inode *inode) { - F2FS_I(inode)->i_advise &= ~type; + return F2FS_I(inode)->i_advise & FADVISE_CP_BIT; } -#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) -#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) -#define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) -#define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) -#define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) -#define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) +static inline void set_cp_file(struct inode *inode) +{ + F2FS_I(inode)->i_advise |= FADVISE_CP_BIT; +} static inline int is_cold_data(struct page *page) { @@ -312,16 +310,29 @@ static inline void clear_cold_data(struct page *page) ClearPageChecked(page); } -static inline int is_node(struct page *page, int type) +static inline int is_cold_node(struct page *page) { void *kaddr = page_address(page); struct f2fs_node *rn = (struct f2fs_node *)kaddr; - return le32_to_cpu(rn->footer.flag) & (1 << type); + unsigned int flag = le32_to_cpu(rn->footer.flag); + return flag & (0x1 << COLD_BIT_SHIFT); } -#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT) -#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT) -#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT) +static inline unsigned char is_fsync_dnode(struct page *page) +{ + void *kaddr = page_address(page); + struct f2fs_node *rn = (struct f2fs_node *)kaddr; + unsigned int flag = le32_to_cpu(rn->footer.flag); + return flag & (0x1 << FSYNC_BIT_SHIFT); +} + +static inline unsigned char is_dent_dnode(struct page *page) +{ + void *kaddr = page_address(page); + struct f2fs_node *rn = (struct f2fs_node *)kaddr; + unsigned int flag = le32_to_cpu(rn->footer.flag); + return flag & (0x1 << DENT_BIT_SHIFT); +} static inline void set_cold_node(struct inode *inode, struct page *page) { @@ -335,15 +346,26 @@ static inline void set_cold_node(struct inode *inode, struct page *page) rn->footer.flag = cpu_to_le32(flag); } -static inline void set_mark(struct page *page, int mark, int type) +static inline void set_fsync_mark(struct page *page, int mark) { - struct f2fs_node *rn = (struct f2fs_node *)page_address(page); + void *kaddr = page_address(page); + struct f2fs_node *rn = (struct f2fs_node *)kaddr; + unsigned int flag = le32_to_cpu(rn->footer.flag); + if (mark) + flag |= (0x1 << FSYNC_BIT_SHIFT); + else + flag &= ~(0x1 << FSYNC_BIT_SHIFT); + rn->footer.flag = cpu_to_le32(flag); +} + +static inline void set_dentry_mark(struct page *page, int mark) +{ + void *kaddr = page_address(page); + struct f2fs_node *rn = (struct f2fs_node *)kaddr; unsigned int flag = le32_to_cpu(rn->footer.flag); if (mark) - flag |= (0x1 << type); + flag |= (0x1 << DENT_BIT_SHIFT); else - flag &= ~(0x1 << type); + flag &= ~(0x1 << DENT_BIT_SHIFT); rn->footer.flag = cpu_to_le32(flag); } -#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT) -#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT) diff --git a/trunk/fs/f2fs/recovery.c b/trunk/fs/f2fs/recovery.c index d56d951c2253..60c8a5097058 100644 --- a/trunk/fs/f2fs/recovery.c +++ b/trunk/fs/f2fs/recovery.c @@ -40,54 +40,36 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, static int recover_dentry(struct page *ipage, struct inode *inode) { - void *kaddr = page_address(ipage); - struct f2fs_node *raw_node = (struct f2fs_node *)kaddr; + struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage); struct f2fs_inode *raw_inode = &(raw_node->i); - nid_t pino = le32_to_cpu(raw_inode->i_pino); - struct f2fs_dir_entry *de; struct qstr name; + struct f2fs_dir_entry *de; struct page *page; - struct inode *dir, *einode; + struct inode *dir; int err = 0; - dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino); - if (!dir) { - dir = f2fs_iget(inode->i_sb, pino); - if (IS_ERR(dir)) { - err = PTR_ERR(dir); - goto out; - } - set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT); - add_dirty_dir_inode(dir); + if (!is_dent_dnode(ipage)) + goto out; + + dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino)); + if (IS_ERR(dir)) { + err = PTR_ERR(dir); + goto out; } name.len = le32_to_cpu(raw_inode->i_namelen); name.name = raw_inode->i_name; -retry: + de = f2fs_find_entry(dir, &name, &page); - if (de && inode->i_ino == le32_to_cpu(de->ino)) { + if (de) { kunmap(page); f2fs_put_page(page, 0); - goto out; - } - if (de) { - einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); - if (IS_ERR(einode)) { - WARN_ON(1); - if (PTR_ERR(einode) == -ENOENT) - err = -EEXIST; - goto out; - } - f2fs_delete_entry(de, page, einode); - iput(einode); - goto retry; + } else { + err = __f2fs_add_link(dir, &name, inode); } - err = __f2fs_add_link(dir, &name, inode); + iput(dir); out: - f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: " - "ino = %x, name = %s, dir = %lx, err = %d", - ino_of_node(ipage), raw_inode->i_name, - IS_ERR(dir) ? 0 : dir->i_ino, err); + kunmap(ipage); return err; } @@ -97,9 +79,6 @@ static int recover_inode(struct inode *inode, struct page *node_page) struct f2fs_node *raw_node = (struct f2fs_node *)kaddr; struct f2fs_inode *raw_inode = &(raw_node->i); - if (!IS_INODE(node_page)) - return 0; - inode->i_mode = le16_to_cpu(raw_inode->i_mode); i_size_write(inode, le64_to_cpu(raw_inode->i_size)); inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); @@ -109,12 +88,7 @@ static int recover_inode(struct inode *inode, struct page *node_page) inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); - if (is_dent_dnode(node_page)) - return recover_dentry(node_page, inode); - - f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s", - ino_of_node(node_page), raw_inode->i_name); - return 0; + return recover_dentry(node_page, inode); } static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) @@ -145,13 +119,14 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) lock_page(page); if (cp_ver != cpver_of_node(page)) - break; + goto unlock_out; if (!is_fsync_dnode(page)) goto next; entry = get_fsync_inode(head, ino_of_node(page)); if (entry) { + entry->blkaddr = blkaddr; if (IS_INODE(page) && is_dent_dnode(page)) set_inode_flag(F2FS_I(entry->inode), FI_INC_LINK); @@ -159,40 +134,48 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) if (IS_INODE(page) && is_dent_dnode(page)) { err = recover_inode_page(sbi, page); if (err) - break; + goto unlock_out; } /* add this fsync inode to the list */ entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS); if (!entry) { err = -ENOMEM; - break; + goto unlock_out; } entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); if (IS_ERR(entry->inode)) { err = PTR_ERR(entry->inode); kmem_cache_free(fsync_entry_slab, entry); - break; + goto unlock_out; } + list_add_tail(&entry->list, head); + entry->blkaddr = blkaddr; + } + if (IS_INODE(page)) { + err = recover_inode(entry->inode, page); + if (err == -ENOENT) { + goto next; + } else if (err) { + err = -EINVAL; + goto unlock_out; + } } - entry->blkaddr = blkaddr; - - err = recover_inode(entry->inode, page); - if (err && err != -ENOENT) - break; next: /* check next segment */ blkaddr = next_blkaddr_of_node(page); } +unlock_out: unlock_page(page); out: __free_pages(page, 0); return err; } -static void destroy_fsync_dnodes(struct list_head *head) +static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi, + struct list_head *head) { struct fsync_inode_entry *entry, *tmp; @@ -203,15 +186,15 @@ static void destroy_fsync_dnodes(struct list_head *head) } } -static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, - block_t blkaddr, struct dnode_of_data *dn) +static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi, + block_t blkaddr) { struct seg_entry *sentry; unsigned int segno = GET_SEGNO(sbi, blkaddr); unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1); struct f2fs_summary sum; - nid_t ino, nid; + nid_t ino; void *kaddr; struct inode *inode; struct page *node_page; @@ -220,7 +203,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, sentry = get_seg_entry(sbi, segno); if (!f2fs_test_bit(blkoff, sentry->cur_valid_map)) - return 0; + return; /* Get the previous summary */ for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) { @@ -239,39 +222,20 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, f2fs_put_page(sum_page, 1); } - /* Use the locked dnode page and inode */ - nid = le32_to_cpu(sum.nid); - if (dn->inode->i_ino == nid) { - struct dnode_of_data tdn = *dn; - tdn.nid = nid; - tdn.node_page = dn->inode_page; - tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); - truncate_data_blocks_range(&tdn, 1); - return 0; - } else if (dn->nid == nid) { - struct dnode_of_data tdn = *dn; - tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); - truncate_data_blocks_range(&tdn, 1); - return 0; - } - /* Get the node page */ - node_page = get_node_page(sbi, nid); - if (IS_ERR(node_page)) - return PTR_ERR(node_page); + node_page = get_node_page(sbi, le32_to_cpu(sum.nid)); bidx = start_bidx_of_node(ofs_of_node(node_page)) + - le16_to_cpu(sum.ofs_in_node); + le16_to_cpu(sum.ofs_in_node); ino = ino_of_node(node_page); f2fs_put_page(node_page, 1); /* Deallocate previous index in the node page */ inode = f2fs_iget(sbi->sb, ino); if (IS_ERR(inode)) - return PTR_ERR(inode); + return; truncate_hole(inode, bidx, bidx + 1); iput(inode); - return 0; } static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, @@ -281,7 +245,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, struct dnode_of_data dn; struct f2fs_summary sum; struct node_info ni; - int err = 0, recovered = 0; + int err = 0; int ilock; start = start_bidx_of_node(ofs_of_node(page)); @@ -319,16 +283,13 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, } /* Check the previous node page having this index */ - err = check_index_in_prev_nodes(sbi, dest, &dn); - if (err) - goto err; + check_index_in_prev_nodes(sbi, dest); set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); /* write dummy data page */ recover_data_page(sbi, NULL, &sum, src, dest); update_extent_cache(dest, &dn); - recovered++; } dn.ofs_in_node++; } @@ -344,14 +305,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, set_page_dirty(dn.node_page); recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr); -err: f2fs_put_dnode(&dn); mutex_unlock_op(sbi, ilock); - - f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, " - "recovered_data = %d blocks, err = %d", - inode->i_ino, recovered, err); - return err; + return 0; } static int recover_data(struct f2fs_sb_info *sbi, @@ -384,7 +340,7 @@ static int recover_data(struct f2fs_sb_info *sbi, lock_page(page); if (cp_ver != cpver_of_node(page)) - break; + goto unlock_out; entry = get_fsync_inode(head, ino_of_node(page)); if (!entry) @@ -392,7 +348,7 @@ static int recover_data(struct f2fs_sb_info *sbi, err = do_recover_data(sbi, entry->inode, page, blkaddr); if (err) - break; + goto out; if (entry->blkaddr == blkaddr) { iput(entry->inode); @@ -403,6 +359,7 @@ static int recover_data(struct f2fs_sb_info *sbi, /* check next segment */ blkaddr = next_blkaddr_of_node(page); } +unlock_out: unlock_page(page); out: __free_pages(page, 0); @@ -425,7 +382,6 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) INIT_LIST_HEAD(&inode_list); /* step #1: find fsynced inode numbers */ - sbi->por_doing = 1; err = find_fsync_dnodes(sbi, &inode_list); if (err) goto out; @@ -434,13 +390,13 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) goto out; /* step #2: recover data */ + sbi->por_doing = 1; err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE); + sbi->por_doing = 0; BUG_ON(!list_empty(&inode_list)); out: - destroy_fsync_dnodes(&inode_list); + destroy_fsync_dnodes(sbi, &inode_list); kmem_cache_destroy(fsync_entry_slab); - sbi->por_doing = 0; - if (!err) - write_checkpoint(sbi, false); + write_checkpoint(sbi, false); return err; } diff --git a/trunk/fs/f2fs/segment.c b/trunk/fs/f2fs/segment.c index a86d125a9885..d8e84e49a5c3 100644 --- a/trunk/fs/f2fs/segment.c +++ b/trunk/fs/f2fs/segment.c @@ -94,7 +94,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, * Adding dirty entry into seglist is not critical operation. * If a given segment is one of current working segments, it won't be added. */ -static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) +void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned short valid_blocks; @@ -126,16 +126,17 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); - unsigned int segno = -1; + unsigned int segno, offset = 0; unsigned int total_segs = TOTAL_SEGS(sbi); mutex_lock(&dirty_i->seglist_lock); while (1) { segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, - segno + 1); + offset); if (segno >= total_segs) break; __set_test_and_free(sbi, segno); + offset = segno + 1; } mutex_unlock(&dirty_i->seglist_lock); } @@ -143,16 +144,17 @@ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) void clear_prefree_segments(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); - unsigned int segno = -1; + unsigned int segno, offset = 0; unsigned int total_segs = TOTAL_SEGS(sbi); mutex_lock(&dirty_i->seglist_lock); while (1) { segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, - segno + 1); + offset); if (segno >= total_segs) break; + offset = segno + 1; if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE])) dirty_i->nr_dirty[PRE]--; @@ -255,11 +257,11 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) * This function should be resided under the curseg_mutex lock */ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, - struct f2fs_summary *sum) + struct f2fs_summary *sum, unsigned short offset) { struct curseg_info *curseg = CURSEG_I(sbi, type); void *addr = curseg->sum_blk; - addr += curseg->next_blkoff * sizeof(struct f2fs_summary); + addr += offset * sizeof(struct f2fs_summary); memcpy(addr, sum, sizeof(struct f2fs_summary)); return; } @@ -309,14 +311,64 @@ static void write_sum_page(struct f2fs_sb_info *sbi, f2fs_put_page(page, 1); } +static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE]; + unsigned int segno; + unsigned int ofs = 0; + + /* + * If there is not enough reserved sections, + * we should not reuse prefree segments. + */ + if (has_not_enough_free_secs(sbi, 0)) + return NULL_SEGNO; + + /* + * NODE page should not reuse prefree segment, + * since those information is used for SPOR. + */ + if (IS_NODESEG(type)) + return NULL_SEGNO; +next: + segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs); + ofs += sbi->segs_per_sec; + + if (segno < TOTAL_SEGS(sbi)) { + int i; + + /* skip intermediate segments in a section */ + if (segno % sbi->segs_per_sec) + goto next; + + /* skip if the section is currently used */ + if (sec_usage_check(sbi, GET_SECNO(sbi, segno))) + goto next; + + /* skip if whole section is not prefree */ + for (i = 1; i < sbi->segs_per_sec; i++) + if (!test_bit(segno + i, prefree_segmap)) + goto next; + + /* skip if whole section was not free at the last checkpoint */ + for (i = 0; i < sbi->segs_per_sec; i++) + if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks) + goto next; + + return segno; + } + return NULL_SEGNO; +} + static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); - unsigned int segno = curseg->segno + 1; + unsigned int segno = curseg->segno; struct free_segmap_info *free_i = FREE_I(sbi); - if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec) - return !test_bit(segno, free_i->free_segmap); + if (segno + 1 < TOTAL_SEGS(sbi) && (segno + 1) % sbi->segs_per_sec) + return !test_bit(segno + 1, free_i->free_segmap); return 0; } @@ -443,7 +495,7 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) int dir = ALLOC_LEFT; write_sum_page(sbi, curseg->sum_blk, - GET_SUM_BLOCK(sbi, segno)); + GET_SUM_BLOCK(sbi, curseg->segno)); if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) dir = ALLOC_RIGHT; @@ -547,7 +599,11 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, goto out; } - if (type == CURSEG_WARM_NODE) + curseg->next_segno = check_prefree_segments(sbi, type); + + if (curseg->next_segno != NULL_SEGNO) + change_curseg(sbi, type, false); + else if (type == CURSEG_WARM_NODE) new_curseg(sbi, type, false); else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) new_curseg(sbi, type, false); @@ -556,10 +612,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, else new_curseg(sbi, type, false); out: -#ifdef CONFIG_F2FS_STAT_FS sbi->segment_count[curseg->alloc_type]++; -#endif - return; } void allocate_new_segments(struct f2fs_sb_info *sbi) @@ -742,7 +795,7 @@ static int __get_segment_type_6(struct page *page, enum page_type p_type) if (S_ISDIR(inode->i_mode)) return CURSEG_HOT_DATA; - else if (is_cold_data(page) || file_is_cold(inode)) + else if (is_cold_data(page) || is_cold_file(inode)) return CURSEG_COLD_DATA; else return CURSEG_WARM_DATA; @@ -791,13 +844,11 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, * because, this function updates a summary entry in the * current summary block. */ - __add_sum_entry(sbi, type, sum); + __add_sum_entry(sbi, type, sum, curseg->next_blkoff); mutex_lock(&sit_i->sentry_lock); __refresh_next_blkoff(sbi, curseg); -#ifdef CONFIG_F2FS_STAT_FS sbi->block_count[curseg->alloc_type]++; -#endif /* * SIT information should be updated before segment allocation, @@ -892,7 +943,7 @@ void recover_data_page(struct f2fs_sb_info *sbi, curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & (sbi->blocks_per_seg - 1); - __add_sum_entry(sbi, type, sum); + __add_sum_entry(sbi, type, sum, curseg->next_blkoff); refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); @@ -929,7 +980,7 @@ void rewrite_node_page(struct f2fs_sb_info *sbi, } curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & (sbi->blocks_per_seg - 1); - __add_sum_entry(sbi, type, sum); + __add_sum_entry(sbi, type, sum, curseg->next_blkoff); /* change the current log to the next block addr in advance */ if (next_segno != segno) { @@ -1528,13 +1579,13 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct free_segmap_info *free_i = FREE_I(sbi); - unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi); + unsigned int segno = 0, offset = 0; unsigned short valid_blocks; - while (1) { + while (segno < TOTAL_SEGS(sbi)) { /* find dirty segment based on free segmap */ - segno = find_next_inuse(free_i, total_segs, offset); - if (segno >= total_segs) + segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset); + if (segno >= TOTAL_SEGS(sbi)) break; offset = segno + 1; valid_blocks = get_valid_blocks(sbi, segno, 0); diff --git a/trunk/fs/f2fs/super.c b/trunk/fs/f2fs/super.c index 75c7dc363e92..8555f7df82c7 100644 --- a/trunk/fs/f2fs/super.c +++ b/trunk/fs/f2fs/super.c @@ -34,7 +34,7 @@ static struct kmem_cache *f2fs_inode_cachep; enum { - Opt_gc_background, + Opt_gc_background_off, Opt_disable_roll_forward, Opt_discard, Opt_noheap, @@ -46,7 +46,7 @@ enum { }; static match_table_t f2fs_tokens = { - {Opt_gc_background, "background_gc=%s"}, + {Opt_gc_background_off, "background_gc_off"}, {Opt_disable_roll_forward, "disable_roll_forward"}, {Opt_discard, "discard"}, {Opt_noheap, "no_heap"}, @@ -76,91 +76,6 @@ static void init_once(void *foo) inode_init_once(&fi->vfs_inode); } -static int parse_options(struct super_block *sb, char *options) -{ - struct f2fs_sb_info *sbi = F2FS_SB(sb); - substring_t args[MAX_OPT_ARGS]; - char *p, *name; - int arg = 0; - - if (!options) - return 0; - - while ((p = strsep(&options, ",")) != NULL) { - int token; - if (!*p) - continue; - /* - * Initialize args struct so we know whether arg was - * found; some options take optional arguments. - */ - args[0].to = args[0].from = NULL; - token = match_token(p, f2fs_tokens, args); - - switch (token) { - case Opt_gc_background: - name = match_strdup(&args[0]); - - if (!name) - return -ENOMEM; - if (!strncmp(name, "on", 2)) - set_opt(sbi, BG_GC); - else if (!strncmp(name, "off", 3)) - clear_opt(sbi, BG_GC); - else { - kfree(name); - return -EINVAL; - } - kfree(name); - break; - case Opt_disable_roll_forward: - set_opt(sbi, DISABLE_ROLL_FORWARD); - break; - case Opt_discard: - set_opt(sbi, DISCARD); - break; - case Opt_noheap: - set_opt(sbi, NOHEAP); - break; -#ifdef CONFIG_F2FS_FS_XATTR - case Opt_nouser_xattr: - clear_opt(sbi, XATTR_USER); - break; -#else - case Opt_nouser_xattr: - f2fs_msg(sb, KERN_INFO, - "nouser_xattr options not supported"); - break; -#endif -#ifdef CONFIG_F2FS_FS_POSIX_ACL - case Opt_noacl: - clear_opt(sbi, POSIX_ACL); - break; -#else - case Opt_noacl: - f2fs_msg(sb, KERN_INFO, "noacl options not supported"); - break; -#endif - case Opt_active_logs: - if (args->from && match_int(args, &arg)) - return -EINVAL; - if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) - return -EINVAL; - sbi->active_logs = arg; - break; - case Opt_disable_ext_identify: - set_opt(sbi, DISABLE_EXT_IDENTIFY); - break; - default: - f2fs_msg(sb, KERN_ERR, - "Unrecognized mount option \"%s\" or missing value", - p); - return -EINVAL; - } - } - return 0; -} - static struct inode *f2fs_alloc_inode(struct super_block *sb) { struct f2fs_inode_info *fi; @@ -197,17 +112,6 @@ static int f2fs_drop_inode(struct inode *inode) return generic_drop_inode(inode); } -/* - * f2fs_dirty_inode() is called from __mark_inode_dirty() - * - * We should call set_dirty_inode to write the dirty inode through write_inode. - */ -static void f2fs_dirty_inode(struct inode *inode, int flags) -{ - set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE); - return; -} - static void f2fs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); @@ -266,7 +170,7 @@ static int f2fs_freeze(struct super_block *sb) { int err; - if (f2fs_readonly(sb)) + if (sb->s_flags & MS_RDONLY) return 0; err = f2fs_sync_fs(sb, 1); @@ -310,10 +214,10 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) { struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); - if (!(root->d_sb->s_flags & MS_RDONLY) && test_opt(sbi, BG_GC)) - seq_printf(seq, ",background_gc=%s", "on"); + if (test_opt(sbi, BG_GC)) + seq_puts(seq, ",background_gc_on"); else - seq_printf(seq, ",background_gc=%s", "off"); + seq_puts(seq, ",background_gc_off"); if (test_opt(sbi, DISABLE_ROLL_FORWARD)) seq_puts(seq, ",disable_roll_forward"); if (test_opt(sbi, DISCARD)) @@ -340,64 +244,11 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) return 0; } -static int f2fs_remount(struct super_block *sb, int *flags, char *data) -{ - struct f2fs_sb_info *sbi = F2FS_SB(sb); - struct f2fs_mount_info org_mount_opt; - int err, active_logs; - - /* - * Save the old mount options in case we - * need to restore them. - */ - org_mount_opt = sbi->mount_opt; - active_logs = sbi->active_logs; - - /* parse mount options */ - err = parse_options(sb, data); - if (err) - goto restore_opts; - - /* - * Previous and new state of filesystem is RO, - * so no point in checking GC conditions. - */ - if ((sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) - goto skip; - - /* - * We stop the GC thread if FS is mounted as RO - * or if background_gc = off is passed in mount - * option. Also sync the filesystem. - */ - if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) { - if (sbi->gc_thread) { - stop_gc_thread(sbi); - f2fs_sync_fs(sb, 1); - } - } else if (test_opt(sbi, BG_GC) && !sbi->gc_thread) { - err = start_gc_thread(sbi); - if (err) - goto restore_opts; - } -skip: - /* Update the POSIXACL Flag */ - sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | - (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); - return 0; - -restore_opts: - sbi->mount_opt = org_mount_opt; - sbi->active_logs = active_logs; - return err; -} - static struct super_operations f2fs_sops = { .alloc_inode = f2fs_alloc_inode, .drop_inode = f2fs_drop_inode, .destroy_inode = f2fs_destroy_inode, .write_inode = f2fs_write_inode, - .dirty_inode = f2fs_dirty_inode, .show_options = f2fs_show_options, .evict_inode = f2fs_evict_inode, .put_super = f2fs_put_super, @@ -405,7 +256,6 @@ static struct super_operations f2fs_sops = { .freeze_fs = f2fs_freeze, .unfreeze_fs = f2fs_unfreeze, .statfs = f2fs_statfs, - .remount_fs = f2fs_remount, }; static struct inode *f2fs_nfs_get_inode(struct super_block *sb, @@ -453,6 +303,79 @@ static const struct export_operations f2fs_export_ops = { .get_parent = f2fs_get_parent, }; +static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi, + char *options) +{ + substring_t args[MAX_OPT_ARGS]; + char *p; + int arg = 0; + + if (!options) + return 0; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + if (!*p) + continue; + /* + * Initialize args struct so we know whether arg was + * found; some options take optional arguments. + */ + args[0].to = args[0].from = NULL; + token = match_token(p, f2fs_tokens, args); + + switch (token) { + case Opt_gc_background_off: + clear_opt(sbi, BG_GC); + break; + case Opt_disable_roll_forward: + set_opt(sbi, DISABLE_ROLL_FORWARD); + break; + case Opt_discard: + set_opt(sbi, DISCARD); + break; + case Opt_noheap: + set_opt(sbi, NOHEAP); + break; +#ifdef CONFIG_F2FS_FS_XATTR + case Opt_nouser_xattr: + clear_opt(sbi, XATTR_USER); + break; +#else + case Opt_nouser_xattr: + f2fs_msg(sb, KERN_INFO, + "nouser_xattr options not supported"); + break; +#endif +#ifdef CONFIG_F2FS_FS_POSIX_ACL + case Opt_noacl: + clear_opt(sbi, POSIX_ACL); + break; +#else + case Opt_noacl: + f2fs_msg(sb, KERN_INFO, "noacl options not supported"); + break; +#endif + case Opt_active_logs: + if (args->from && match_int(args, &arg)) + return -EINVAL; + if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) + return -EINVAL; + sbi->active_logs = arg; + break; + case Opt_disable_ext_identify: + set_opt(sbi, DISABLE_EXT_IDENTIFY); + break; + default: + f2fs_msg(sb, KERN_ERR, + "Unrecognized mount option \"%s\" or missing value", + p); + return -EINVAL; + } + } + return 0; +} + static loff_t max_file_size(unsigned bits) { loff_t result = ADDRS_PER_INODE; @@ -618,7 +541,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) if (err) goto free_sb_buf; } - sb->s_fs_info = sbi; /* init some FS parameters */ sbi->active_logs = NR_CURSEG_TYPE; @@ -631,7 +553,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) set_opt(sbi, POSIX_ACL); #endif /* parse mount options */ - err = parse_options(sb, (char *)data); + err = parse_options(sb, sbi, (char *)data); if (err) goto free_sb_buf; @@ -643,6 +565,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) sb->s_xattr = f2fs_xattr_handlers; sb->s_export_op = &f2fs_export_ops; sb->s_magic = F2FS_SUPER_MAGIC; + sb->s_fs_info = sbi; sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); @@ -751,16 +674,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) "Cannot recover all fsync data errno=%ld", err); } - /* - * If filesystem is not mounted as read-only then - * do start the gc_thread. - */ - if (!(sb->s_flags & MS_RDONLY)) { - /* After POR, we can run background GC thread.*/ - err = start_gc_thread(sbi); - if (err) - goto fail; - } + /* After POR, we can run background GC thread */ + err = start_gc_thread(sbi); + if (err) + goto fail; err = f2fs_build_stats(sbi); if (err) diff --git a/trunk/fs/f2fs/xattr.c b/trunk/fs/f2fs/xattr.c index 3ab07ecd86ca..0b02dce31356 100644 --- a/trunk/fs/f2fs/xattr.c +++ b/trunk/fs/f2fs/xattr.c @@ -20,7 +20,6 @@ */ #include #include -#include #include "f2fs.h" #include "xattr.h" @@ -44,10 +43,6 @@ static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list, prefix = XATTR_TRUSTED_PREFIX; prefix_len = XATTR_TRUSTED_PREFIX_LEN; break; - case F2FS_XATTR_INDEX_SECURITY: - prefix = XATTR_SECURITY_PREFIX; - prefix_len = XATTR_SECURITY_PREFIX_LEN; - break; default: return -EINVAL; } @@ -55,7 +50,7 @@ static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list, total_len = prefix_len + name_len + 1; if (list && total_len <= list_size) { memcpy(list, prefix, prefix_len); - memcpy(list + prefix_len, name, name_len); + memcpy(list+prefix_len, name, name_len); list[prefix_len + name_len] = '\0'; } return total_len; @@ -75,14 +70,13 @@ static int f2fs_xattr_generic_get(struct dentry *dentry, const char *name, if (!capable(CAP_SYS_ADMIN)) return -EPERM; break; - case F2FS_XATTR_INDEX_SECURITY: - break; default: return -EINVAL; } if (strcmp(name, "") == 0) return -EINVAL; - return f2fs_getxattr(dentry->d_inode, type, name, buffer, size); + return f2fs_getxattr(dentry->d_inode, type, name, + buffer, size); } static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name, @@ -99,15 +93,13 @@ static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name, if (!capable(CAP_SYS_ADMIN)) return -EPERM; break; - case F2FS_XATTR_INDEX_SECURITY: - break; default: return -EINVAL; } if (strcmp(name, "") == 0) return -EINVAL; - return f2fs_setxattr(dentry->d_inode, type, name, value, size, NULL); + return f2fs_setxattr(dentry->d_inode, type, name, value, size); } static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list, @@ -153,31 +145,6 @@ static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name, return 0; } -#ifdef CONFIG_F2FS_FS_SECURITY -static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array, - void *page) -{ - const struct xattr *xattr; - int err = 0; - - for (xattr = xattr_array; xattr->name != NULL; xattr++) { - err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY, - xattr->name, xattr->value, - xattr->value_len, (struct page *)page); - if (err < 0) - break; - } - return err; -} - -int f2fs_init_security(struct inode *inode, struct inode *dir, - const struct qstr *qstr, struct page *ipage) -{ - return security_inode_init_security(inode, dir, qstr, - &f2fs_initxattrs, ipage); -} -#endif - const struct xattr_handler f2fs_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .flags = F2FS_XATTR_INDEX_USER, @@ -202,14 +169,6 @@ const struct xattr_handler f2fs_xattr_advise_handler = { .set = f2fs_xattr_advise_set, }; -const struct xattr_handler f2fs_xattr_security_handler = { - .prefix = XATTR_SECURITY_PREFIX, - .flags = F2FS_XATTR_INDEX_SECURITY, - .list = f2fs_xattr_generic_list, - .get = f2fs_xattr_generic_get, - .set = f2fs_xattr_generic_set, -}; - static const struct xattr_handler *f2fs_xattr_handler_map[] = { [F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler, #ifdef CONFIG_F2FS_FS_POSIX_ACL @@ -217,9 +176,6 @@ static const struct xattr_handler *f2fs_xattr_handler_map[] = { [F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &f2fs_xattr_acl_default_handler, #endif [F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler, -#ifdef CONFIG_F2FS_FS_SECURITY - [F2FS_XATTR_INDEX_SECURITY] = &f2fs_xattr_security_handler, -#endif [F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler, }; @@ -230,9 +186,6 @@ const struct xattr_handler *f2fs_xattr_handlers[] = { &f2fs_xattr_acl_default_handler, #endif &f2fs_xattr_trusted_handler, -#ifdef CONFIG_F2FS_FS_SECURITY - &f2fs_xattr_security_handler, -#endif &f2fs_xattr_advise_handler, NULL, }; @@ -265,8 +218,6 @@ int f2fs_getxattr(struct inode *inode, int name_index, const char *name, return -ENODATA; page = get_node_page(sbi, fi->i_xattr_nid); - if (IS_ERR(page)) - return PTR_ERR(page); base_addr = page_address(page); list_for_each_xattr(entry, base_addr) { @@ -317,8 +268,6 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) return 0; page = get_node_page(sbi, fi->i_xattr_nid); - if (IS_ERR(page)) - return PTR_ERR(page); base_addr = page_address(page); list_for_each_xattr(entry, base_addr) { @@ -347,7 +296,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) } int f2fs_setxattr(struct inode *inode, int name_index, const char *name, - const void *value, size_t value_len, struct page *ipage) + const void *value, size_t value_len) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_inode_info *fi = F2FS_I(inode); @@ -386,7 +335,7 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name, set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid); mark_inode_dirty(inode); - page = new_node_page(&dn, XATTR_NODE_OFFSET, ipage); + page = new_node_page(&dn, XATTR_NODE_OFFSET); if (IS_ERR(page)) { alloc_nid_failed(sbi, fi->i_xattr_nid); fi->i_xattr_nid = 0; @@ -486,10 +435,7 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name, inode->i_ctime = CURRENT_TIME; clear_inode_flag(fi, FI_ACL_MODE); } - if (ipage) - update_inode(inode, ipage); - else - update_inode_page(inode); + update_inode_page(inode); mutex_unlock_op(sbi, ilock); return 0; diff --git a/trunk/fs/f2fs/xattr.h b/trunk/fs/f2fs/xattr.h index 3c0817bef25d..49c9558305e3 100644 --- a/trunk/fs/f2fs/xattr.h +++ b/trunk/fs/f2fs/xattr.h @@ -112,19 +112,21 @@ extern const struct xattr_handler f2fs_xattr_trusted_handler; extern const struct xattr_handler f2fs_xattr_acl_access_handler; extern const struct xattr_handler f2fs_xattr_acl_default_handler; extern const struct xattr_handler f2fs_xattr_advise_handler; -extern const struct xattr_handler f2fs_xattr_security_handler; extern const struct xattr_handler *f2fs_xattr_handlers[]; -extern int f2fs_setxattr(struct inode *, int, const char *, - const void *, size_t, struct page *); -extern int f2fs_getxattr(struct inode *, int, const char *, void *, size_t); -extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t); +extern int f2fs_setxattr(struct inode *inode, int name_index, const char *name, + const void *value, size_t value_len); +extern int f2fs_getxattr(struct inode *inode, int name_index, const char *name, + void *buffer, size_t buffer_size); +extern ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, + size_t buffer_size); + #else #define f2fs_xattr_handlers NULL static inline int f2fs_setxattr(struct inode *inode, int name_index, - const char *name, const void *value, size_t value_len) + const char *name, const void *value, size_t value_len) { return -EOPNOTSUPP; } @@ -140,14 +142,4 @@ static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, } #endif -#ifdef CONFIG_F2FS_FS_SECURITY -extern int f2fs_init_security(struct inode *, struct inode *, - const struct qstr *, struct page *); -#else -static inline int f2fs_init_security(struct inode *inode, struct inode *dir, - const struct qstr *qstr, struct page *ipage) -{ - return 0; -} -#endif #endif /* __F2FS_XATTR_H__ */ diff --git a/trunk/fs/fat/dir.c b/trunk/fs/fat/dir.c index 3963ede84eb0..7a6f02caf286 100644 --- a/trunk/fs/fat/dir.c +++ b/trunk/fs/fat/dir.c @@ -543,7 +543,6 @@ int fat_search_long(struct inode *inode, const unsigned char *name, EXPORT_SYMBOL_GPL(fat_search_long); struct fat_ioctl_filldir_callback { - struct dir_context ctx; void __user *dirent; int result; /* for dir ioctl */ @@ -553,9 +552,8 @@ struct fat_ioctl_filldir_callback { int short_len; }; -static int __fat_readdir(struct inode *inode, struct file *file, - struct dir_context *ctx, int short_only, - struct fat_ioctl_filldir_callback *both) +static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent, + filldir_t filldir, int short_only, int both) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); @@ -566,20 +564,27 @@ static int __fat_readdir(struct inode *inode, struct file *file, unsigned char bufname[FAT_MAX_SHORT_SIZE]; int isvfat = sbi->options.isvfat; const char *fill_name = NULL; - int fake_offset = 0; + unsigned long inum; + unsigned long lpos, dummy, *furrfu = &lpos; loff_t cpos; int short_len = 0, fill_len = 0; int ret = 0; mutex_lock(&sbi->s_lock); - cpos = ctx->pos; + cpos = filp->f_pos; /* Fake . and .. for the root directory. */ if (inode->i_ino == MSDOS_ROOT_INO) { - if (!dir_emit_dots(file, ctx)) - goto out; - if (ctx->pos == 2) { - fake_offset = 1; + while (cpos < 2) { + if (filldir(dirent, "..", cpos+1, cpos, + MSDOS_ROOT_INO, DT_DIR) < 0) + goto out; + cpos++; + filp->f_pos++; + } + if (cpos == 2) { + dummy = 2; + furrfu = &dummy; cpos = 0; } } @@ -614,7 +619,7 @@ static int __fat_readdir(struct inode *inode, struct file *file, int status = fat_parse_long(inode, &cpos, &bh, &de, &unicode, &nr_slots); if (status < 0) { - ctx->pos = cpos; + filp->f_pos = cpos; ret = status; goto out; } else if (status == PARSE_INVALID) @@ -634,19 +639,6 @@ static int __fat_readdir(struct inode *inode, struct file *file, /* !both && !short_only, so we don't need shortname. */ if (!both) goto start_filldir; - - short_len = fat_parse_short(sb, de, bufname, - sbi->options.dotsOK); - if (short_len == 0) - goto record_end; - /* hack for fat_ioctl_filldir() */ - both->longname = fill_name; - both->long_len = fill_len; - both->shortname = bufname; - both->short_len = short_len; - fill_name = NULL; - fill_len = 0; - goto start_filldir; } } @@ -654,21 +646,28 @@ static int __fat_readdir(struct inode *inode, struct file *file, if (short_len == 0) goto record_end; - fill_name = bufname; - fill_len = short_len; + if (nr_slots) { + /* hack for fat_ioctl_filldir() */ + struct fat_ioctl_filldir_callback *p = dirent; + + p->longname = fill_name; + p->long_len = fill_len; + p->shortname = bufname; + p->short_len = short_len; + fill_name = NULL; + fill_len = 0; + } else { + fill_name = bufname; + fill_len = short_len; + } start_filldir: - if (!fake_offset) - ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); - - if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { - if (!dir_emit_dot(file, ctx)) - goto fill_failed; - } else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) { - if (!dir_emit_dotdot(file, ctx)) - goto fill_failed; + lpos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); + if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) + inum = inode->i_ino; + else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) { + inum = parent_ino(filp->f_path.dentry); } else { - unsigned long inum; loff_t i_pos = fat_make_i_pos(sb, bh, de); struct inode *tmp = fat_iget(sb, i_pos); if (tmp) { @@ -676,17 +675,18 @@ static int __fat_readdir(struct inode *inode, struct file *file, iput(tmp); } else inum = iunique(sb, MSDOS_ROOT_INO); - if (!dir_emit(ctx, fill_name, fill_len, inum, - (de->attr & ATTR_DIR) ? DT_DIR : DT_REG)) - goto fill_failed; } + if (filldir(dirent, fill_name, fill_len, *furrfu, inum, + (de->attr & ATTR_DIR) ? DT_DIR : DT_REG) < 0) + goto fill_failed; + record_end: - fake_offset = 0; - ctx->pos = cpos; + furrfu = &lpos; + filp->f_pos = cpos; goto get_new; end_of_dir: - ctx->pos = cpos; + filp->f_pos = cpos; fill_failed: brelse(bh); if (unicode) @@ -696,9 +696,10 @@ static int __fat_readdir(struct inode *inode, struct file *file, return ret; } -static int fat_readdir(struct file *file, struct dir_context *ctx) +static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir) { - return __fat_readdir(file_inode(file), file, ctx, 0, NULL); + struct inode *inode = file_inode(filp); + return __fat_readdir(inode, filp, dirent, filldir, 0, 0); } #define FAT_IOCTL_FILLDIR_FUNC(func, dirent_type) \ @@ -754,25 +755,20 @@ efault: \ FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, __fat_dirent) -static int fat_ioctl_readdir(struct inode *inode, struct file *file, +static int fat_ioctl_readdir(struct inode *inode, struct file *filp, void __user *dirent, filldir_t filldir, int short_only, int both) { - struct fat_ioctl_filldir_callback buf = { - .ctx.actor = filldir, - .dirent = dirent - }; + struct fat_ioctl_filldir_callback buf; int ret; buf.dirent = dirent; buf.result = 0; mutex_lock(&inode->i_mutex); - buf.ctx.pos = file->f_pos; ret = -ENOENT; if (!IS_DEADDIR(inode)) { - ret = __fat_readdir(inode, file, &buf.ctx, - short_only, both ? &buf : NULL); - file->f_pos = buf.ctx.pos; + ret = __fat_readdir(inode, filp, &buf, filldir, + short_only, both); } mutex_unlock(&inode->i_mutex); if (ret >= 0) @@ -858,7 +854,7 @@ static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd, const struct file_operations fat_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = fat_readdir, + .readdir = fat_readdir, .unlocked_ioctl = fat_dir_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = fat_compat_dir_ioctl, diff --git a/trunk/fs/fat/inode.c b/trunk/fs/fat/inode.c index 5d4513cb1b3c..dfce656ddb33 100644 --- a/trunk/fs/fat/inode.c +++ b/trunk/fs/fat/inode.c @@ -1229,19 +1229,6 @@ static int fat_read_root(struct inode *inode) return 0; } -static unsigned long calc_fat_clusters(struct super_block *sb) -{ - struct msdos_sb_info *sbi = MSDOS_SB(sb); - - /* Divide first to avoid overflow */ - if (sbi->fat_bits != 12) { - unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits; - return ent_per_sec * sbi->fat_length; - } - - return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; -} - /* * Read the super block of an MS-DOS FS. */ @@ -1447,7 +1434,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, sbi->dirty = b->fat16.state & FAT_STATE_DIRTY; /* check that FAT table does not overflow */ - fat_clusters = calc_fat_clusters(sb); + fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT); if (total_clusters > MAX_FAT(sb)) { if (!silent) diff --git a/trunk/fs/file_table.c b/trunk/fs/file_table.c index 485dc0eddd67..cd4d87a82951 100644 --- a/trunk/fs/file_table.c +++ b/trunk/fs/file_table.c @@ -306,18 +306,17 @@ void fput(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) { struct task_struct *task = current; - unsigned long flags; - file_sb_list_del(file); - if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { - init_task_work(&file->f_u.fu_rcuhead, ____fput); - if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) - return; + if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) { + unsigned long flags; + spin_lock_irqsave(&delayed_fput_lock, flags); + list_add(&file->f_u.fu_list, &delayed_fput_list); + schedule_work(&delayed_fput_work); + spin_unlock_irqrestore(&delayed_fput_lock, flags); + return; } - spin_lock_irqsave(&delayed_fput_lock, flags); - list_add(&file->f_u.fu_list, &delayed_fput_list); - schedule_work(&delayed_fput_work); - spin_unlock_irqrestore(&delayed_fput_lock, flags); + init_task_work(&file->f_u.fu_rcuhead, ____fput); + task_work_add(task, &file->f_u.fu_rcuhead, true); } } diff --git a/trunk/fs/freevxfs/vxfs_lookup.c b/trunk/fs/freevxfs/vxfs_lookup.c index 25d4099a4aea..664b07a53870 100644 --- a/trunk/fs/freevxfs/vxfs_lookup.c +++ b/trunk/fs/freevxfs/vxfs_lookup.c @@ -49,7 +49,7 @@ static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int); -static int vxfs_readdir(struct file *, struct dir_context *); +static int vxfs_readdir(struct file *, void *, filldir_t); const struct inode_operations vxfs_dir_inode_ops = { .lookup = vxfs_lookup, @@ -58,7 +58,7 @@ const struct inode_operations vxfs_dir_inode_ops = { const struct file_operations vxfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = vxfs_readdir, + .readdir = vxfs_readdir, }; @@ -235,7 +235,7 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags) * Zero. */ static int -vxfs_readdir(struct file *fp, struct dir_context *ctx) +vxfs_readdir(struct file *fp, void *retp, filldir_t filler) { struct inode *ip = file_inode(fp); struct super_block *sbp = ip->i_sb; @@ -243,17 +243,20 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx) u_long page, npages, block, pblocks, nblocks, offset; loff_t pos; - if (ctx->pos == 0) { - if (!dir_emit_dot(fp, ctx)) - return 0; - ctx->pos = 1; + switch ((long)fp->f_pos) { + case 0: + if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0) + goto out; + fp->f_pos++; + /* fallthrough */ + case 1: + if (filler(retp, "..", 2, fp->f_pos, VXFS_INO(ip)->vii_dotdot, DT_DIR) < 0) + goto out; + fp->f_pos++; + /* fallthrough */ } - if (ctx->pos == 1) { - if (!dir_emit(ctx, "..", 2, VXFS_INO(ip)->vii_dotdot, DT_DIR)) - return 0; - ctx->pos = 2; - } - pos = ctx->pos - 2; + + pos = fp->f_pos - 2; if (pos > VXFS_DIRROUND(ip->i_size)) return 0; @@ -267,16 +270,16 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx) block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks; for (; page < npages; page++, block = 0) { - char *kaddr; + caddr_t kaddr; struct page *pp; pp = vxfs_get_page(ip->i_mapping, page); if (IS_ERR(pp)) continue; - kaddr = (char *)page_address(pp); + kaddr = (caddr_t)page_address(pp); for (; block <= nblocks && block <= pblocks; block++) { - char *baddr, *limit; + caddr_t baddr, limit; struct vxfs_dirblk *dbp; struct vxfs_direct *de; @@ -289,18 +292,21 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx) (kaddr + offset) : (baddr + VXFS_DIRBLKOV(dbp))); - for (; (char *)de <= limit; de = vxfs_next_entry(de)) { + for (; (caddr_t)de <= limit; de = vxfs_next_entry(de)) { + int over; + if (!de->d_reclen) break; if (!de->d_ino) continue; - offset = (char *)de - kaddr; - ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; - if (!dir_emit(ctx, de->d_name, de->d_namelen, - de->d_ino, DT_UNKNOWN)) { + offset = (caddr_t)de - kaddr; + over = filler(retp, de->d_name, de->d_namelen, + ((page << PAGE_CACHE_SHIFT) | offset) + 2, + de->d_ino, DT_UNKNOWN); + if (over) { vxfs_put_page(pp); - return 0; + goto done; } } offset = 0; @@ -308,6 +314,9 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx) vxfs_put_page(pp); offset = 0; } - ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; + +done: + fp->f_pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; +out: return 0; } diff --git a/trunk/fs/fs-writeback.c b/trunk/fs/fs-writeback.c index a85ac4e33436..3be57189efd5 100644 --- a/trunk/fs/fs-writeback.c +++ b/trunk/fs/fs-writeback.c @@ -45,7 +45,6 @@ struct wb_writeback_work { unsigned int for_kupdate:1; unsigned int range_cyclic:1; unsigned int for_background:1; - unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ enum wb_reason reason; /* why was writeback initiated? */ struct list_head list; /* pending work list */ @@ -444,11 +443,9 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) /* * Make sure to wait on the data before writing out the metadata. * This is important for filesystems that modify metadata on data - * I/O completion. We don't do it for sync(2) writeback because it has a - * separate, external IO completion path and ->sync_fs for guaranteeing - * inode metadata is written back correctly. + * I/O completion. */ - if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { + if (wbc->sync_mode == WB_SYNC_ALL) { int err = filemap_fdatawait(mapping); if (ret == 0) ret = err; @@ -581,7 +578,6 @@ static long writeback_sb_inodes(struct super_block *sb, .tagged_writepages = work->tagged_writepages, .for_kupdate = work->for_kupdate, .for_background = work->for_background, - .for_sync = work->for_sync, .range_cyclic = work->range_cyclic, .range_start = 0, .range_end = LLONG_MAX, @@ -1366,7 +1362,6 @@ void sync_inodes_sb(struct super_block *sb) .range_cyclic = 0, .done = &done, .reason = WB_REASON_SYNC, - .for_sync = 1, }; /* Nothing to do? */ diff --git a/trunk/fs/fscache/cache.c b/trunk/fs/fscache/cache.c index f7cff367db7f..b52aed1dca97 100644 --- a/trunk/fs/fscache/cache.c +++ b/trunk/fs/fscache/cache.c @@ -115,7 +115,7 @@ struct fscache_cache *fscache_select_cache_for_object( struct fscache_object, cookie_link); cache = object->cache; - if (fscache_object_is_dying(object) || + if (object->state >= FSCACHE_OBJECT_DYING || test_bit(FSCACHE_IOERROR, &cache->flags)) cache = NULL; @@ -224,10 +224,8 @@ int fscache_add_cache(struct fscache_cache *cache, BUG_ON(!ifsdef); cache->flags = 0; - ifsdef->event_mask = - ((1 << NR_FSCACHE_OBJECT_EVENTS) - 1) & - ~(1 << FSCACHE_OBJECT_EV_CLEARED); - __set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &ifsdef->flags); + ifsdef->event_mask = ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED); + ifsdef->state = FSCACHE_OBJECT_ACTIVE; if (!tagname) tagname = cache->identifier; @@ -332,25 +330,25 @@ static void fscache_withdraw_all_objects(struct fscache_cache *cache, { struct fscache_object *object; - while (!list_empty(&cache->object_list)) { - spin_lock(&cache->object_list_lock); - - if (!list_empty(&cache->object_list)) { - object = list_entry(cache->object_list.next, - struct fscache_object, cache_link); - list_move_tail(&object->cache_link, dying_objects); + spin_lock(&cache->object_list_lock); - _debug("withdraw %p", object->cookie); + while (!list_empty(&cache->object_list)) { + object = list_entry(cache->object_list.next, + struct fscache_object, cache_link); + list_move_tail(&object->cache_link, dying_objects); - /* This must be done under object_list_lock to prevent - * a race with fscache_drop_object(). - */ - fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); - } + _debug("withdraw %p", object->cookie); + spin_lock(&object->lock); spin_unlock(&cache->object_list_lock); + fscache_raise_event(object, FSCACHE_OBJECT_EV_WITHDRAW); + spin_unlock(&object->lock); + cond_resched(); + spin_lock(&cache->object_list_lock); } + + spin_unlock(&cache->object_list_lock); } /** diff --git a/trunk/fs/fscache/cookie.c b/trunk/fs/fscache/cookie.c index 0e91a3c9fdb2..e2cba1f60c21 100644 --- a/trunk/fs/fscache/cookie.c +++ b/trunk/fs/fscache/cookie.c @@ -95,11 +95,6 @@ struct fscache_cookie *__fscache_acquire_cookie( atomic_set(&cookie->usage, 1); atomic_set(&cookie->n_children, 0); - /* We keep the active count elevated until relinquishment to prevent an - * attempt to wake up every time the object operations queue quiesces. - */ - atomic_set(&cookie->n_active, 1); - atomic_inc(&parent->usage); atomic_inc(&parent->n_children); @@ -182,6 +177,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie) cookie->flags = (1 << FSCACHE_COOKIE_LOOKING_UP) | + (1 << FSCACHE_COOKIE_CREATING) | (1 << FSCACHE_COOKIE_NO_DATA_YET); /* ask the cache to allocate objects for this cookie and its parent @@ -209,7 +205,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie) /* initiate the process of looking up all the objects in the chain * (done by fscache_initialise_object()) */ - fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD); + fscache_enqueue_object(object); spin_unlock(&cookie->lock); @@ -289,7 +285,7 @@ static int fscache_alloc_object(struct fscache_cache *cache, object_already_extant: ret = -ENOBUFS; - if (fscache_object_is_dead(object)) { + if (object->state >= FSCACHE_OBJECT_DYING) { spin_unlock(&cookie->lock); goto error; } @@ -325,7 +321,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie, ret = -EEXIST; hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) { if (p->cache == object->cache) { - if (fscache_object_is_dying(p)) + if (p->state >= FSCACHE_OBJECT_DYING) ret = -ENOBUFS; goto cant_attach_object; } @@ -336,7 +332,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie, hlist_for_each_entry(p, &cookie->parent->backing_objects, cookie_link) { if (p->cache == object->cache) { - if (fscache_object_is_dying(p)) { + if (p->state >= FSCACHE_OBJECT_DYING) { ret = -ENOBUFS; spin_unlock(&cookie->parent->lock); goto cant_attach_object; @@ -404,7 +400,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie) object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); - if (fscache_object_is_live(object)) + if (object->state < FSCACHE_OBJECT_DYING) fscache_raise_event( object, FSCACHE_OBJECT_EV_INVALIDATE); } @@ -471,7 +467,9 @@ EXPORT_SYMBOL(__fscache_update_cookie); */ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) { + struct fscache_cache *cache; struct fscache_object *object; + unsigned long event; fscache_stat(&fscache_n_relinquishes); if (retire) @@ -483,11 +481,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) return; } - _enter("%p{%s,%p,%d},%d", - cookie, cookie->def->name, cookie->netfs_data, - atomic_read(&cookie->n_active), retire); - - ASSERTCMP(atomic_read(&cookie->n_active), >, 0); + _enter("%p{%s,%p},%d", + cookie, cookie->def->name, cookie->netfs_data, retire); if (atomic_read(&cookie->n_children) != 0) { printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n", @@ -495,28 +490,62 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) BUG(); } - /* No further netfs-accessing operations on this cookie permitted */ - set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags); - if (retire) - set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags); + /* wait for the cookie to finish being instantiated (or to fail) */ + if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) { + fscache_stat(&fscache_n_relinquishes_waitcrt); + wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING, + fscache_wait_bit, TASK_UNINTERRUPTIBLE); + } + event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE; + +try_again: spin_lock(&cookie->lock); - hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { - fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); - } - spin_unlock(&cookie->lock); - /* Wait for cessation of activity requiring access to the netfs (when - * n_active reaches 0). - */ - if (!atomic_dec_and_test(&cookie->n_active)) - wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, - TASK_UNINTERRUPTIBLE); + /* break links with all the active objects */ + while (!hlist_empty(&cookie->backing_objects)) { + int n_reads; + object = hlist_entry(cookie->backing_objects.first, + struct fscache_object, + cookie_link); + + _debug("RELEASE OBJ%x", object->debug_id); + + set_bit(FSCACHE_COOKIE_WAITING_ON_READS, &cookie->flags); + n_reads = atomic_read(&object->n_reads); + if (n_reads) { + int n_ops = object->n_ops; + int n_in_progress = object->n_in_progress; + spin_unlock(&cookie->lock); + printk(KERN_ERR "FS-Cache:" + " Cookie '%s' still has %d outstanding reads (%d,%d)\n", + cookie->def->name, + n_reads, n_ops, n_in_progress); + wait_on_bit(&cookie->flags, FSCACHE_COOKIE_WAITING_ON_READS, + fscache_wait_bit, TASK_UNINTERRUPTIBLE); + printk("Wait finished\n"); + goto try_again; + } + + /* detach each cache object from the object cookie */ + spin_lock(&object->lock); + hlist_del_init(&object->cookie_link); + + cache = object->cache; + object->cookie = NULL; + fscache_raise_event(object, event); + spin_unlock(&object->lock); - /* Clear pointers back to the netfs */ + if (atomic_dec_and_test(&cookie->usage)) + /* the cookie refcount shouldn't be reduced to 0 yet */ + BUG(); + } + + /* detach pointers back to the netfs */ cookie->netfs_data = NULL; cookie->def = NULL; - BUG_ON(cookie->stores.rnode); + + spin_unlock(&cookie->lock); if (cookie->parent) { ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0); @@ -524,7 +553,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) atomic_dec(&cookie->parent->n_children); } - /* Dispose of the netfs's link to the cookie */ + /* finally dispose of the cookie */ ASSERTCMP(atomic_read(&cookie->usage), >, 0); fscache_cookie_put(cookie); diff --git a/trunk/fs/fscache/fsdef.c b/trunk/fs/fscache/fsdef.c index 10a2ade0bdf8..f5b4baee7352 100644 --- a/trunk/fs/fscache/fsdef.c +++ b/trunk/fs/fscache/fsdef.c @@ -55,7 +55,6 @@ static struct fscache_cookie_def fscache_fsdef_index_def = { struct fscache_cookie fscache_fsdef_index = { .usage = ATOMIC_INIT(1), - .n_active = ATOMIC_INIT(1), .lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock), .backing_objects = HLIST_HEAD_INIT, .def = &fscache_fsdef_index_def, diff --git a/trunk/fs/fscache/internal.h b/trunk/fs/fscache/internal.h index 12d505bedb5c..ee38fef4be51 100644 --- a/trunk/fs/fscache/internal.h +++ b/trunk/fs/fscache/internal.h @@ -93,11 +93,14 @@ static inline bool fscache_object_congested(void) extern int fscache_wait_bit(void *); extern int fscache_wait_bit_interruptible(void *); -extern int fscache_wait_atomic_t(atomic_t *); /* * object.c */ +extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5]; + +extern void fscache_withdrawing_object(struct fscache_cache *, + struct fscache_object *); extern void fscache_enqueue_object(struct fscache_object *); /* @@ -107,10 +110,8 @@ extern void fscache_enqueue_object(struct fscache_object *); extern const struct file_operations fscache_objlist_fops; extern void fscache_objlist_add(struct fscache_object *); -extern void fscache_objlist_remove(struct fscache_object *); #else #define fscache_objlist_add(object) do {} while(0) -#define fscache_objlist_remove(object) do {} while(0) #endif /* @@ -290,10 +291,6 @@ static inline void fscache_raise_event(struct fscache_object *object, unsigned event) { BUG_ON(event >= NR_FSCACHE_OBJECT_EVENTS); -#if 0 - printk("*** fscache_raise_event(OBJ%d{%lx},%x)\n", - object->debug_id, object->event_mask, (1 << event)); -#endif if (!test_and_set_bit(event, &object->events) && test_bit(event, &object->event_mask)) fscache_enqueue_object(object); diff --git a/trunk/fs/fscache/main.c b/trunk/fs/fscache/main.c index 7c27907e650c..f9d856773f79 100644 --- a/trunk/fs/fscache/main.c +++ b/trunk/fs/fscache/main.c @@ -205,6 +205,7 @@ int fscache_wait_bit(void *flags) schedule(); return 0; } +EXPORT_SYMBOL(fscache_wait_bit); /* * wait_on_bit() sleep function for interruptible waiting @@ -214,12 +215,4 @@ int fscache_wait_bit_interruptible(void *flags) schedule(); return signal_pending(current); } - -/* - * wait_on_atomic_t() sleep function for uninterruptible waiting - */ -int fscache_wait_atomic_t(atomic_t *p) -{ - schedule(); - return 0; -} +EXPORT_SYMBOL(fscache_wait_bit_interruptible); diff --git a/trunk/fs/fscache/netfs.c b/trunk/fs/fscache/netfs.c index b1bb6117473a..e028b8eb1c40 100644 --- a/trunk/fs/fscache/netfs.c +++ b/trunk/fs/fscache/netfs.c @@ -40,7 +40,6 @@ int __fscache_register_netfs(struct fscache_netfs *netfs) /* initialise the primary index cookie */ atomic_set(&netfs->primary_index->usage, 1); atomic_set(&netfs->primary_index->n_children, 0); - atomic_set(&netfs->primary_index->n_active, 1); netfs->primary_index->def = &fscache_fsdef_netfs_def; netfs->primary_index->parent = &fscache_fsdef_index; diff --git a/trunk/fs/fscache/object-list.c b/trunk/fs/fscache/object-list.c index e1959efad64f..f27c89d17885 100644 --- a/trunk/fs/fscache/object-list.c +++ b/trunk/fs/fscache/object-list.c @@ -70,10 +70,13 @@ void fscache_objlist_add(struct fscache_object *obj) write_unlock(&fscache_object_list_lock); } -/* - * Remove an object from the object list. +/** + * fscache_object_destroy - Note that a cache object is about to be destroyed + * @object: The object to be destroyed + * + * Note the imminent destruction and deallocation of a cache object record. */ -void fscache_objlist_remove(struct fscache_object *obj) +void fscache_object_destroy(struct fscache_object *obj) { write_lock(&fscache_object_list_lock); @@ -82,6 +85,7 @@ void fscache_objlist_remove(struct fscache_object *obj) write_unlock(&fscache_object_list_lock); } +EXPORT_SYMBOL(fscache_object_destroy); /* * find the object in the tree on or after the specified index @@ -162,14 +166,15 @@ static int fscache_objlist_show(struct seq_file *m, void *v) { struct fscache_objlist_data *data = m->private; struct fscache_object *obj = v; - struct fscache_cookie *cookie; unsigned long config = data->config; + uint16_t keylen, auxlen; char _type[3], *type; + bool no_cookie; u8 *buf = data->buf, *p; if ((unsigned long) v == 1) { seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS" - " EM EV FL S" + " EM EV F S" " | NETFS_COOKIE_DEF TY FL NETFS_DATA"); if (config & (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX)) @@ -188,7 +193,7 @@ static int fscache_objlist_show(struct seq_file *m, void *v) if ((unsigned long) v == 2) { seq_puts(m, "======== ======== ==== ===== === === === == =====" - " == == == =" + " == == = =" " | ================ == == ================"); if (config & (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX)) @@ -211,11 +216,10 @@ static int fscache_objlist_show(struct seq_file *m, void *v) } \ } while(0) - cookie = obj->cookie; if (~config) { - FILTER(cookie->def, + FILTER(obj->cookie, COOKIE, NOCOOKIE); - FILTER(fscache_object_is_active(obj) || + FILTER(obj->state != FSCACHE_OBJECT_ACTIVE || obj->n_ops != 0 || obj->n_obj_ops != 0 || obj->flags || @@ -231,10 +235,10 @@ static int fscache_objlist_show(struct seq_file *m, void *v) } seq_printf(m, - "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %2lx %1x | ", + "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1x | ", obj->debug_id, obj->parent ? obj->parent->debug_id : -1, - obj->state->short_name, + fscache_object_states_short[obj->state], obj->n_children, obj->n_ops, obj->n_obj_ops, @@ -246,40 +250,48 @@ static int fscache_objlist_show(struct seq_file *m, void *v) obj->flags, work_busy(&obj->work)); - if (fscache_use_cookie(obj)) { - uint16_t keylen = 0, auxlen = 0; + no_cookie = true; + keylen = auxlen = 0; + if (obj->cookie) { + spin_lock(&obj->lock); + if (obj->cookie) { + switch (obj->cookie->def->type) { + case 0: + type = "IX"; + break; + case 1: + type = "DT"; + break; + default: + sprintf(_type, "%02u", + obj->cookie->def->type); + type = _type; + break; + } - switch (cookie->def->type) { - case 0: - type = "IX"; - break; - case 1: - type = "DT"; - break; - default: - sprintf(_type, "%02u", cookie->def->type); - type = _type; - break; + seq_printf(m, "%-16s %s %2lx %16p", + obj->cookie->def->name, + type, + obj->cookie->flags, + obj->cookie->netfs_data); + + if (obj->cookie->def->get_key && + config & FSCACHE_OBJLIST_CONFIG_KEY) + keylen = obj->cookie->def->get_key( + obj->cookie->netfs_data, + buf, 400); + + if (obj->cookie->def->get_aux && + config & FSCACHE_OBJLIST_CONFIG_AUX) + auxlen = obj->cookie->def->get_aux( + obj->cookie->netfs_data, + buf + keylen, 512 - keylen); + + no_cookie = false; } + spin_unlock(&obj->lock); - seq_printf(m, "%-16s %s %2lx %16p", - cookie->def->name, - type, - cookie->flags, - cookie->netfs_data); - - if (cookie->def->get_key && - config & FSCACHE_OBJLIST_CONFIG_KEY) - keylen = cookie->def->get_key(cookie->netfs_data, - buf, 400); - - if (cookie->def->get_aux && - config & FSCACHE_OBJLIST_CONFIG_AUX) - auxlen = cookie->def->get_aux(cookie->netfs_data, - buf + keylen, 512 - keylen); - fscache_unuse_cookie(obj); - - if (keylen > 0 || auxlen > 0) { + if (!no_cookie && (keylen > 0 || auxlen > 0)) { seq_printf(m, " "); for (p = buf; keylen > 0; keylen--) seq_printf(m, "%02x", *p++); @@ -290,11 +302,12 @@ static int fscache_objlist_show(struct seq_file *m, void *v) seq_printf(m, "%02x", *p++); } } + } + if (no_cookie) + seq_printf(m, "\n"); + else seq_printf(m, "\n"); - } else { - seq_printf(m, "\n"); - } return 0; } diff --git a/trunk/fs/fscache/object.c b/trunk/fs/fscache/object.c index 86d75a60b20c..50d41c180211 100644 --- a/trunk/fs/fscache/object.c +++ b/trunk/fs/fscache/object.c @@ -15,131 +15,52 @@ #define FSCACHE_DEBUG_LEVEL COOKIE #include #include -#include #include "internal.h" -static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int); -static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int); -static const struct fscache_state *fscache_drop_object(struct fscache_object *, int); -static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int); -static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int); -static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int); -static const struct fscache_state *fscache_kill_object(struct fscache_object *, int); -static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int); -static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int); -static const struct fscache_state *fscache_object_available(struct fscache_object *, int); -static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); -static const struct fscache_state *fscache_update_object(struct fscache_object *, int); - -#define __STATE_NAME(n) fscache_osm_##n -#define STATE(n) (&__STATE_NAME(n)) - -/* - * Define a work state. Work states are execution states. No event processing - * is performed by them. The function attached to a work state returns a - * pointer indicating the next state to which the state machine should - * transition. Returning NO_TRANSIT repeats the current state, but goes back - * to the scheduler first. - */ -#define WORK_STATE(n, sn, f) \ - const struct fscache_state __STATE_NAME(n) = { \ - .name = #n, \ - .short_name = sn, \ - .work = f \ - } - -/* - * Returns from work states. - */ -#define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); }) - -#define NO_TRANSIT ((struct fscache_state *)NULL) - -/* - * Define a wait state. Wait states are event processing states. No execution - * is performed by them. Wait states are just tables of "if event X occurs, - * clear it and transition to state Y". The dispatcher returns to the - * scheduler if none of the events in which the wait state has an interest are - * currently pending. - */ -#define WAIT_STATE(n, sn, ...) \ - const struct fscache_state __STATE_NAME(n) = { \ - .name = #n, \ - .short_name = sn, \ - .work = NULL, \ - .transitions = { __VA_ARGS__, { 0, NULL } } \ - } - -#define TRANSIT_TO(state, emask) \ - { .events = (emask), .transit_to = STATE(state) } - -/* - * The object state machine. - */ -static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object); -static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready); -static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation); -static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object); -static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object); -static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available); -static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents); - -static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object); -static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object); - -static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure); -static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); -static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); -static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); -static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); - -static WAIT_STATE(WAIT_FOR_INIT, "?INI", - TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); - -static WAIT_STATE(WAIT_FOR_PARENT, "?PRN", - TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY)); - -static WAIT_STATE(WAIT_FOR_CMD, "?CMD", - TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE), - TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE), - TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); - -static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR", - TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED)); - -/* - * Out-of-band event transition tables. These are for handling unexpected - * events, such as an I/O error. If an OOB event occurs, the state machine - * clears and disables the event and forces a transition to the nominated work - * state (acurrently executing work states will complete first). - * - * In such a situation, object->state remembers the state the machine should - * have been in/gone to and returning NO_TRANSIT returns to that. - */ -static const struct fscache_transition fscache_osm_init_oob[] = { - TRANSIT_TO(ABORT_INIT, - (1 << FSCACHE_OBJECT_EV_ERROR) | - (1 << FSCACHE_OBJECT_EV_KILL)), - { 0, NULL } -}; - -static const struct fscache_transition fscache_osm_lookup_oob[] = { - TRANSIT_TO(LOOKUP_FAILURE, - (1 << FSCACHE_OBJECT_EV_ERROR) | - (1 << FSCACHE_OBJECT_EV_KILL)), - { 0, NULL } +const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = { + [FSCACHE_OBJECT_INIT] = "OBJECT_INIT", + [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP", + [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING", + [FSCACHE_OBJECT_AVAILABLE] = "OBJECT_AVAILABLE", + [FSCACHE_OBJECT_ACTIVE] = "OBJECT_ACTIVE", + [FSCACHE_OBJECT_INVALIDATING] = "OBJECT_INVALIDATING", + [FSCACHE_OBJECT_UPDATING] = "OBJECT_UPDATING", + [FSCACHE_OBJECT_DYING] = "OBJECT_DYING", + [FSCACHE_OBJECT_LC_DYING] = "OBJECT_LC_DYING", + [FSCACHE_OBJECT_ABORT_INIT] = "OBJECT_ABORT_INIT", + [FSCACHE_OBJECT_RELEASING] = "OBJECT_RELEASING", + [FSCACHE_OBJECT_RECYCLING] = "OBJECT_RECYCLING", + [FSCACHE_OBJECT_WITHDRAWING] = "OBJECT_WITHDRAWING", + [FSCACHE_OBJECT_DEAD] = "OBJECT_DEAD", }; - -static const struct fscache_transition fscache_osm_run_oob[] = { - TRANSIT_TO(KILL_OBJECT, - (1 << FSCACHE_OBJECT_EV_ERROR) | - (1 << FSCACHE_OBJECT_EV_KILL)), - { 0, NULL } +EXPORT_SYMBOL(fscache_object_states); + +const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = { + [FSCACHE_OBJECT_INIT] = "INIT", + [FSCACHE_OBJECT_LOOKING_UP] = "LOOK", + [FSCACHE_OBJECT_CREATING] = "CRTN", + [FSCACHE_OBJECT_AVAILABLE] = "AVBL", + [FSCACHE_OBJECT_ACTIVE] = "ACTV", + [FSCACHE_OBJECT_INVALIDATING] = "INVL", + [FSCACHE_OBJECT_UPDATING] = "UPDT", + [FSCACHE_OBJECT_DYING] = "DYNG", + [FSCACHE_OBJECT_LC_DYING] = "LCDY", + [FSCACHE_OBJECT_ABORT_INIT] = "ABTI", + [FSCACHE_OBJECT_RELEASING] = "RELS", + [FSCACHE_OBJECT_RECYCLING] = "RCYC", + [FSCACHE_OBJECT_WITHDRAWING] = "WTHD", + [FSCACHE_OBJECT_DEAD] = "DEAD", }; static int fscache_get_object(struct fscache_object *); static void fscache_put_object(struct fscache_object *); -static bool fscache_enqueue_dependents(struct fscache_object *, int); +static void fscache_initialise_object(struct fscache_object *); +static void fscache_lookup_object(struct fscache_object *); +static void fscache_object_available(struct fscache_object *); +static void fscache_invalidate_object(struct fscache_object *); +static void fscache_release_object(struct fscache_object *); +static void fscache_withdraw_object(struct fscache_object *); +static void fscache_enqueue_dependents(struct fscache_object *); static void fscache_dequeue_object(struct fscache_object *); /* @@ -154,116 +75,295 @@ static inline void fscache_done_parent_op(struct fscache_object *object) object->debug_id, parent->debug_id, parent->n_ops); spin_lock_nested(&parent->lock, 1); - parent->n_obj_ops--; parent->n_ops--; + parent->n_obj_ops--; if (parent->n_ops == 0) fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); spin_unlock(&parent->lock); } /* - * Object state machine dispatcher. + * Notify netfs of invalidation completion. */ -static void fscache_object_sm_dispatcher(struct fscache_object *object) +static inline void fscache_invalidation_complete(struct fscache_cookie *cookie) { - const struct fscache_transition *t; - const struct fscache_state *state, *new_state; - unsigned long events, event_mask; - int event = -1; + if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) + wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); +} + +/* + * process events that have been sent to an object's state machine + * - initiates parent lookup + * - does object lookup + * - does object creation + * - does object recycling and retirement + * - does object withdrawal + */ +static void fscache_object_state_machine(struct fscache_object *object) +{ + enum fscache_object_state new_state; + struct fscache_cookie *cookie; + int event; ASSERT(object != NULL); _enter("{OBJ%x,%s,%lx}", - object->debug_id, object->state->name, object->events); - - event_mask = object->event_mask; -restart: - object->event_mask = 0; /* Mask normal event handling */ - state = object->state; -restart_masked: - events = object->events; - - /* Handle any out-of-band events (typically an error) */ - if (events & object->oob_event_mask) { - _debug("{OBJ%x} oob %lx", - object->debug_id, events & object->oob_event_mask); - for (t = object->oob_table; t->events; t++) { - if (events & t->events) { - state = t->transit_to; - ASSERT(state->work != NULL); - event = fls(events & t->events) - 1; - __clear_bit(event, &object->oob_event_mask); - clear_bit(event, &object->events); - goto execute_work_state; - } + object->debug_id, fscache_object_states[object->state], + object->events); + + switch (object->state) { + /* wait for the parent object to become ready */ + case FSCACHE_OBJECT_INIT: + object->event_mask = + FSCACHE_OBJECT_EVENTS_MASK & + ~(1 << FSCACHE_OBJECT_EV_CLEARED); + fscache_initialise_object(object); + goto done; + + /* look up the object metadata on disk */ + case FSCACHE_OBJECT_LOOKING_UP: + fscache_lookup_object(object); + goto lookup_transit; + + /* create the object metadata on disk */ + case FSCACHE_OBJECT_CREATING: + fscache_lookup_object(object); + goto lookup_transit; + + /* handle an object becoming available; start pending + * operations and queue dependent operations for processing */ + case FSCACHE_OBJECT_AVAILABLE: + fscache_object_available(object); + goto active_transit; + + /* normal running state */ + case FSCACHE_OBJECT_ACTIVE: + goto active_transit; + + /* Invalidate an object on disk */ + case FSCACHE_OBJECT_INVALIDATING: + clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events); + fscache_stat(&fscache_n_invalidates_run); + fscache_stat(&fscache_n_cop_invalidate_object); + fscache_invalidate_object(object); + fscache_stat_d(&fscache_n_cop_invalidate_object); + fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE); + goto active_transit; + + /* update the object metadata on disk */ + case FSCACHE_OBJECT_UPDATING: + clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); + fscache_stat(&fscache_n_updates_run); + fscache_stat(&fscache_n_cop_update_object); + object->cache->ops->update_object(object); + fscache_stat_d(&fscache_n_cop_update_object); + goto active_transit; + + /* handle an object dying during lookup or creation */ + case FSCACHE_OBJECT_LC_DYING: + object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); + fscache_stat(&fscache_n_cop_lookup_complete); + object->cache->ops->lookup_complete(object); + fscache_stat_d(&fscache_n_cop_lookup_complete); + + spin_lock(&object->lock); + object->state = FSCACHE_OBJECT_DYING; + cookie = object->cookie; + if (cookie) { + if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, + &cookie->flags)) + wake_up_bit(&cookie->flags, + FSCACHE_COOKIE_LOOKING_UP); + if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, + &cookie->flags)) + wake_up_bit(&cookie->flags, + FSCACHE_COOKIE_CREATING); } - } + spin_unlock(&object->lock); - /* Wait states are just transition tables */ - if (!state->work) { - if (events & event_mask) { - for (t = state->transitions; t->events; t++) { - if (events & t->events) { - new_state = t->transit_to; - event = fls(events & t->events) - 1; - clear_bit(event, &object->events); - _debug("{OBJ%x} ev %d: %s -> %s", - object->debug_id, event, - state->name, new_state->name); - object->state = state = new_state; - goto execute_work_state; - } - } + fscache_done_parent_op(object); - /* The event mask didn't include all the tabled bits */ - BUG(); + /* wait for completion of all active operations on this object + * and the death of all child objects of this object */ + case FSCACHE_OBJECT_DYING: + dying: + clear_bit(FSCACHE_OBJECT_EV_CLEARED, &object->events); + spin_lock(&object->lock); + _debug("dying OBJ%x {%d,%d}", + object->debug_id, object->n_ops, object->n_children); + if (object->n_ops == 0 && object->n_children == 0) { + object->event_mask &= + ~(1 << FSCACHE_OBJECT_EV_CLEARED); + object->event_mask |= + (1 << FSCACHE_OBJECT_EV_WITHDRAW) | + (1 << FSCACHE_OBJECT_EV_RETIRE) | + (1 << FSCACHE_OBJECT_EV_RELEASE) | + (1 << FSCACHE_OBJECT_EV_ERROR); + } else { + object->event_mask &= + ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | + (1 << FSCACHE_OBJECT_EV_RETIRE) | + (1 << FSCACHE_OBJECT_EV_RELEASE) | + (1 << FSCACHE_OBJECT_EV_ERROR)); + object->event_mask |= + 1 << FSCACHE_OBJECT_EV_CLEARED; } - /* Randomly woke up */ - goto unmask_events; - } + spin_unlock(&object->lock); + fscache_enqueue_dependents(object); + fscache_start_operations(object); + goto terminal_transit; -execute_work_state: - _debug("{OBJ%x} exec %s", object->debug_id, state->name); + /* handle an abort during initialisation */ + case FSCACHE_OBJECT_ABORT_INIT: + _debug("handle abort init %lx", object->events); + object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); - new_state = state->work(object, event); - event = -1; - if (new_state == NO_TRANSIT) { - _debug("{OBJ%x} %s notrans", object->debug_id, state->name); - fscache_enqueue_object(object); - event_mask = object->oob_event_mask; - goto unmask_events; + spin_lock(&object->lock); + fscache_dequeue_object(object); + + object->state = FSCACHE_OBJECT_DYING; + if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, + &object->cookie->flags)) + wake_up_bit(&object->cookie->flags, + FSCACHE_COOKIE_CREATING); + spin_unlock(&object->lock); + goto dying; + + /* handle the netfs releasing an object and possibly marking it + * obsolete too */ + case FSCACHE_OBJECT_RELEASING: + case FSCACHE_OBJECT_RECYCLING: + object->event_mask &= + ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | + (1 << FSCACHE_OBJECT_EV_RETIRE) | + (1 << FSCACHE_OBJECT_EV_RELEASE) | + (1 << FSCACHE_OBJECT_EV_ERROR)); + fscache_release_object(object); + spin_lock(&object->lock); + object->state = FSCACHE_OBJECT_DEAD; + spin_unlock(&object->lock); + fscache_stat(&fscache_n_object_dead); + goto terminal_transit; + + /* handle the parent cache of this object being withdrawn from + * active service */ + case FSCACHE_OBJECT_WITHDRAWING: + object->event_mask &= + ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | + (1 << FSCACHE_OBJECT_EV_RETIRE) | + (1 << FSCACHE_OBJECT_EV_RELEASE) | + (1 << FSCACHE_OBJECT_EV_ERROR)); + fscache_withdraw_object(object); + spin_lock(&object->lock); + object->state = FSCACHE_OBJECT_DEAD; + spin_unlock(&object->lock); + fscache_stat(&fscache_n_object_dead); + goto terminal_transit; + + /* complain about the object being woken up once it is + * deceased */ + case FSCACHE_OBJECT_DEAD: + printk(KERN_ERR "FS-Cache:" + " Unexpected event in dead state %lx\n", + object->events & object->event_mask); + BUG(); + + default: + printk(KERN_ERR "FS-Cache: Unknown object state %u\n", + object->state); + BUG(); } - _debug("{OBJ%x} %s -> %s", - object->debug_id, state->name, new_state->name); - object->state = state = new_state; + /* determine the transition from a lookup state */ +lookup_transit: + event = fls(object->events & object->event_mask) - 1; + switch (event) { + case FSCACHE_OBJECT_EV_WITHDRAW: + case FSCACHE_OBJECT_EV_RETIRE: + case FSCACHE_OBJECT_EV_RELEASE: + case FSCACHE_OBJECT_EV_ERROR: + new_state = FSCACHE_OBJECT_LC_DYING; + goto change_state; + case FSCACHE_OBJECT_EV_INVALIDATE: + new_state = FSCACHE_OBJECT_INVALIDATING; + goto change_state; + case FSCACHE_OBJECT_EV_REQUEUE: + goto done; + case -1: + goto done; /* sleep until event */ + default: + goto unsupported_event; + } - if (state->work) { - if (unlikely(state->work == ((void *)2UL))) { - _leave(" [dead]"); - return; - } - goto restart_masked; + /* determine the transition from an active state */ +active_transit: + event = fls(object->events & object->event_mask) - 1; + switch (event) { + case FSCACHE_OBJECT_EV_WITHDRAW: + case FSCACHE_OBJECT_EV_RETIRE: + case FSCACHE_OBJECT_EV_RELEASE: + case FSCACHE_OBJECT_EV_ERROR: + new_state = FSCACHE_OBJECT_DYING; + goto change_state; + case FSCACHE_OBJECT_EV_INVALIDATE: + new_state = FSCACHE_OBJECT_INVALIDATING; + goto change_state; + case FSCACHE_OBJECT_EV_UPDATE: + new_state = FSCACHE_OBJECT_UPDATING; + goto change_state; + case -1: + new_state = FSCACHE_OBJECT_ACTIVE; + goto change_state; /* sleep until event */ + default: + goto unsupported_event; + } + + /* determine the transition from a terminal state */ +terminal_transit: + event = fls(object->events & object->event_mask) - 1; + switch (event) { + case FSCACHE_OBJECT_EV_WITHDRAW: + new_state = FSCACHE_OBJECT_WITHDRAWING; + goto change_state; + case FSCACHE_OBJECT_EV_RETIRE: + new_state = FSCACHE_OBJECT_RECYCLING; + goto change_state; + case FSCACHE_OBJECT_EV_RELEASE: + new_state = FSCACHE_OBJECT_RELEASING; + goto change_state; + case FSCACHE_OBJECT_EV_ERROR: + new_state = FSCACHE_OBJECT_WITHDRAWING; + goto change_state; + case FSCACHE_OBJECT_EV_CLEARED: + new_state = FSCACHE_OBJECT_DYING; + goto change_state; + case -1: + goto done; /* sleep until event */ + default: + goto unsupported_event; } - /* Transited to wait state */ - event_mask = object->oob_event_mask; - for (t = state->transitions; t->events; t++) - event_mask |= t->events; - -unmask_events: - object->event_mask = event_mask; - smp_mb(); - events = object->events; - if (events & event_mask) - goto restart; - _leave(" [msk %lx]", event_mask); +change_state: + spin_lock(&object->lock); + object->state = new_state; + spin_unlock(&object->lock); + +done: + _leave(" [->%s]", fscache_object_states[object->state]); + return; + +unsupported_event: + printk(KERN_ERR "FS-Cache:" + " Unsupported event %d [%lx/%lx] in state %s\n", + event, object->events, object->event_mask, + fscache_object_states[object->state]); + BUG(); } /* * execute an object */ -static void fscache_object_work_func(struct work_struct *work) +void fscache_object_work_func(struct work_struct *work) { struct fscache_object *object = container_of(work, struct fscache_object, work); @@ -272,70 +372,14 @@ static void fscache_object_work_func(struct work_struct *work) _enter("{OBJ%x}", object->debug_id); start = jiffies; - fscache_object_sm_dispatcher(object); + fscache_object_state_machine(object); fscache_hist(fscache_objs_histogram, start); + if (object->events & object->event_mask) + fscache_enqueue_object(object); + clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); fscache_put_object(object); } - -/** - * fscache_object_init - Initialise a cache object description - * @object: Object description - * @cookie: Cookie object will be attached to - * @cache: Cache in which backing object will be found - * - * Initialise a cache object description to its basic values. - * - * See Documentation/filesystems/caching/backend-api.txt for a complete - * description. - */ -void fscache_object_init(struct fscache_object *object, - struct fscache_cookie *cookie, - struct fscache_cache *cache) -{ - const struct fscache_transition *t; - - atomic_inc(&cache->object_count); - - object->state = STATE(WAIT_FOR_INIT); - object->oob_table = fscache_osm_init_oob; - object->flags = 1 << FSCACHE_OBJECT_IS_LIVE; - spin_lock_init(&object->lock); - INIT_LIST_HEAD(&object->cache_link); - INIT_HLIST_NODE(&object->cookie_link); - INIT_WORK(&object->work, fscache_object_work_func); - INIT_LIST_HEAD(&object->dependents); - INIT_LIST_HEAD(&object->dep_link); - INIT_LIST_HEAD(&object->pending_ops); - object->n_children = 0; - object->n_ops = object->n_in_progress = object->n_exclusive = 0; - object->events = 0; - object->store_limit = 0; - object->store_limit_l = 0; - object->cache = cache; - object->cookie = cookie; - object->parent = NULL; - - object->oob_event_mask = 0; - for (t = object->oob_table; t->events; t++) - object->oob_event_mask |= t->events; - object->event_mask = object->oob_event_mask; - for (t = object->state->transitions; t->events; t++) - object->event_mask |= t->events; -} -EXPORT_SYMBOL(fscache_object_init); - -/* - * Abort object initialisation before we start it. - */ -static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object, - int event) -{ - _enter("{OBJ%x},%d", object->debug_id, event); - - object->oob_event_mask = 0; - fscache_dequeue_object(object); - return transit_to(KILL_OBJECT); -} +EXPORT_SYMBOL(fscache_object_work_func); /* * initialise an object @@ -343,136 +387,130 @@ static const struct fscache_state *fscache_abort_initialisation(struct fscache_o * immediately to do a creation * - we may need to start the process of creating a parent and we need to wait * for the parent's lookup and creation to complete if it's not there yet + * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the + * leaf-most cookies of the object and all its children */ -static const struct fscache_state *fscache_initialise_object(struct fscache_object *object, - int event) +static void fscache_initialise_object(struct fscache_object *object) { struct fscache_object *parent; - bool success; - - _enter("{OBJ%x},%d", object->debug_id, event); - - ASSERT(list_empty(&object->dep_link)); - - parent = object->parent; - if (!parent) { - _leave(" [no parent]"); - return transit_to(DROP_OBJECT); - } - _debug("parent: %s of:%lx", parent->state->name, parent->flags); + _enter(""); + ASSERT(object->cookie != NULL); + ASSERT(object->cookie->parent != NULL); - if (fscache_object_is_dying(parent)) { - _leave(" [bad parent]"); - return transit_to(DROP_OBJECT); + if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) | + (1 << FSCACHE_OBJECT_EV_RELEASE) | + (1 << FSCACHE_OBJECT_EV_RETIRE) | + (1 << FSCACHE_OBJECT_EV_WITHDRAW))) { + _debug("abort init %lx", object->events); + spin_lock(&object->lock); + object->state = FSCACHE_OBJECT_ABORT_INIT; + spin_unlock(&object->lock); + return; } - if (fscache_object_is_available(parent)) { - _leave(" [ready]"); - return transit_to(PARENT_READY); - } + spin_lock(&object->cookie->lock); + spin_lock_nested(&object->cookie->parent->lock, 1); - _debug("wait"); + parent = object->parent; + if (!parent) { + _debug("no parent"); + set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); + } else { + spin_lock(&object->lock); + spin_lock_nested(&parent->lock, 1); + _debug("parent %s", fscache_object_states[parent->state]); + + if (parent->state >= FSCACHE_OBJECT_DYING) { + _debug("bad parent"); + set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); + } else if (parent->state < FSCACHE_OBJECT_AVAILABLE) { + _debug("wait"); + + /* we may get woken up in this state by child objects + * binding on to us, so we need to make sure we don't + * add ourself to the list multiple times */ + if (list_empty(&object->dep_link)) { + fscache_stat(&fscache_n_cop_grab_object); + object->cache->ops->grab_object(object); + fscache_stat_d(&fscache_n_cop_grab_object); + list_add(&object->dep_link, + &parent->dependents); + + /* fscache_acquire_non_index_cookie() uses this + * to wake the chain up */ + if (parent->state == FSCACHE_OBJECT_INIT) + fscache_enqueue_object(parent); + } + } else { + _debug("go"); + parent->n_ops++; + parent->n_obj_ops++; + object->lookup_jif = jiffies; + object->state = FSCACHE_OBJECT_LOOKING_UP; + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); + } - spin_lock(&parent->lock); - fscache_stat(&fscache_n_cop_grab_object); - success = false; - if (fscache_object_is_live(parent) && - object->cache->ops->grab_object(object)) { - list_add(&object->dep_link, &parent->dependents); - success = true; - } - fscache_stat_d(&fscache_n_cop_grab_object); - spin_unlock(&parent->lock); - if (!success) { - _leave(" [grab failed]"); - return transit_to(DROP_OBJECT); + spin_unlock(&parent->lock); + spin_unlock(&object->lock); } - /* fscache_acquire_non_index_cookie() uses this - * to wake the chain up */ - fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD); - _leave(" [wait]"); - return transit_to(WAIT_FOR_PARENT); -} - -/* - * Once the parent object is ready, we should kick off our lookup op. - */ -static const struct fscache_state *fscache_parent_ready(struct fscache_object *object, - int event) -{ - struct fscache_object *parent = object->parent; - - _enter("{OBJ%x},%d", object->debug_id, event); - - ASSERT(parent != NULL); - - spin_lock(&parent->lock); - parent->n_ops++; - parent->n_obj_ops++; - object->lookup_jif = jiffies; - spin_unlock(&parent->lock); - + spin_unlock(&object->cookie->parent->lock); + spin_unlock(&object->cookie->lock); _leave(""); - return transit_to(LOOK_UP_OBJECT); } /* * look an object up in the cache from which it was allocated * - we hold an "access lock" on the parent object, so the parent object cannot * be withdrawn by either party till we've finished + * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the + * leaf-most cookies of the object and all its children */ -static const struct fscache_state *fscache_look_up_object(struct fscache_object *object, - int event) +static void fscache_lookup_object(struct fscache_object *object) { struct fscache_cookie *cookie = object->cookie; - struct fscache_object *parent = object->parent; + struct fscache_object *parent; int ret; - _enter("{OBJ%x},%d", object->debug_id, event); - - object->oob_table = fscache_osm_lookup_oob; + _enter(""); + parent = object->parent; ASSERT(parent != NULL); ASSERTCMP(parent->n_ops, >, 0); ASSERTCMP(parent->n_obj_ops, >, 0); /* make sure the parent is still available */ - ASSERT(fscache_object_is_available(parent)); - - if (fscache_object_is_dying(parent) || - test_bit(FSCACHE_IOERROR, &object->cache->flags) || - !fscache_use_cookie(object)) { - _leave(" [unavailable]"); - return transit_to(LOOKUP_FAILURE); + ASSERTCMP(parent->state, >=, FSCACHE_OBJECT_AVAILABLE); + + if (parent->state >= FSCACHE_OBJECT_DYING || + test_bit(FSCACHE_IOERROR, &object->cache->flags)) { + _debug("unavailable"); + set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); + _leave(""); + return; } - _debug("LOOKUP \"%s\" in \"%s\"", - cookie->def->name, object->cache->tag->name); + _debug("LOOKUP \"%s/%s\" in \"%s\"", + parent->cookie->def->name, cookie->def->name, + object->cache->tag->name); fscache_stat(&fscache_n_object_lookups); fscache_stat(&fscache_n_cop_lookup_object); ret = object->cache->ops->lookup_object(object); fscache_stat_d(&fscache_n_cop_lookup_object); - fscache_unuse_cookie(object); + if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events)) + set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); if (ret == -ETIMEDOUT) { /* probably stuck behind another object, so move this one to * the back of the queue */ fscache_stat(&fscache_n_object_lookups_timed_out); - _leave(" [timeout]"); - return NO_TRANSIT; - } - - if (ret < 0) { - _leave(" [error]"); - return transit_to(LOOKUP_FAILURE); + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); } - _leave(" [ok]"); - return transit_to(OBJECT_AVAILABLE); + _leave(""); } /** @@ -486,20 +524,32 @@ void fscache_object_lookup_negative(struct fscache_object *object) { struct fscache_cookie *cookie = object->cookie; - _enter("{OBJ%x,%s}", object->debug_id, object->state->name); + _enter("{OBJ%x,%s}", + object->debug_id, fscache_object_states[object->state]); - if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { + spin_lock(&object->lock); + if (object->state == FSCACHE_OBJECT_LOOKING_UP) { fscache_stat(&fscache_n_object_lookups_negative); - /* Allow write requests to begin stacking up and read requests to begin - * returning ENODATA. - */ + /* transit here to allow write requests to begin stacking up + * and read requests to begin returning ENODATA */ + object->state = FSCACHE_OBJECT_CREATING; + spin_unlock(&object->lock); + + set_bit(FSCACHE_COOKIE_PENDING_FILL, &cookie->flags); set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); _debug("wake up lookup %p", &cookie->flags); - clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); + smp_mb__before_clear_bit(); + clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); + smp_mb__after_clear_bit(); wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); + } else { + ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING); + spin_unlock(&object->lock); } + _leave(""); } EXPORT_SYMBOL(fscache_object_lookup_negative); @@ -518,26 +568,38 @@ void fscache_obtained_object(struct fscache_object *object) { struct fscache_cookie *cookie = object->cookie; - _enter("{OBJ%x,%s}", object->debug_id, object->state->name); + _enter("{OBJ%x,%s}", + object->debug_id, fscache_object_states[object->state]); /* if we were still looking up, then we must have a positive lookup * result, in which case there may be data available */ - if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { + spin_lock(&object->lock); + if (object->state == FSCACHE_OBJECT_LOOKING_UP) { fscache_stat(&fscache_n_object_lookups_positive); - /* We do (presumably) have data */ - clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); + clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); - /* Allow write requests to begin stacking up and read requests - * to begin shovelling data. - */ - clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); + object->state = FSCACHE_OBJECT_AVAILABLE; + spin_unlock(&object->lock); + + smp_mb__before_clear_bit(); + clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); + smp_mb__after_clear_bit(); wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); } else { + ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING); fscache_stat(&fscache_n_object_created); + + object->state = FSCACHE_OBJECT_AVAILABLE; + spin_unlock(&object->lock); + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); + smp_wmb(); } - set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); + if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) + wake_up_bit(&cookie->flags, FSCACHE_COOKIE_CREATING); + _leave(""); } EXPORT_SYMBOL(fscache_obtained_object); @@ -545,15 +607,16 @@ EXPORT_SYMBOL(fscache_obtained_object); /* * handle an object that has just become available */ -static const struct fscache_state *fscache_object_available(struct fscache_object *object, - int event) +static void fscache_object_available(struct fscache_object *object) { - _enter("{OBJ%x},%d", object->debug_id, event); - - object->oob_table = fscache_osm_run_oob; + _enter("{OBJ%x}", object->debug_id); spin_lock(&object->lock); + if (object->cookie && + test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags)) + wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING); + fscache_done_parent_op(object); if (object->n_in_progress == 0) { if (object->n_ops > 0) { @@ -568,158 +631,130 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec fscache_stat(&fscache_n_cop_lookup_complete); object->cache->ops->lookup_complete(object); fscache_stat_d(&fscache_n_cop_lookup_complete); + fscache_enqueue_dependents(object); fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); fscache_stat(&fscache_n_object_avail); _leave(""); - return transit_to(JUMPSTART_DEPS); } /* - * Wake up this object's dependent objects now that we've become available. + * drop an object's attachments */ -static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object, - int event) +static void fscache_drop_object(struct fscache_object *object) { - _enter("{OBJ%x},%d", object->debug_id, event); - - if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY)) - return NO_TRANSIT; /* Not finished; requeue */ - return transit_to(WAIT_FOR_CMD); -} - -/* - * Handle lookup or creation failute. - */ -static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object, - int event) -{ - struct fscache_cookie *cookie; - - _enter("{OBJ%x},%d", object->debug_id, event); - - object->oob_event_mask = 0; - - fscache_stat(&fscache_n_cop_lookup_complete); - object->cache->ops->lookup_complete(object); - fscache_stat_d(&fscache_n_cop_lookup_complete); + struct fscache_object *parent = object->parent; + struct fscache_cache *cache = object->cache; - cookie = object->cookie; - set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); - if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); + _enter("{OBJ%x,%d}", object->debug_id, object->n_children); - fscache_done_parent_op(object); - return transit_to(KILL_OBJECT); -} + ASSERTCMP(object->cookie, ==, NULL); + ASSERT(hlist_unhashed(&object->cookie_link)); -/* - * Wait for completion of all active operations on this object and the death of - * all child objects of this object. - */ -static const struct fscache_state *fscache_kill_object(struct fscache_object *object, - int event) -{ - _enter("{OBJ%x,%d,%d},%d", - object->debug_id, object->n_ops, object->n_children, event); + spin_lock(&cache->object_list_lock); + list_del_init(&object->cache_link); + spin_unlock(&cache->object_list_lock); - clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); - object->oob_event_mask = 0; + fscache_stat(&fscache_n_cop_drop_object); + cache->ops->drop_object(object); + fscache_stat_d(&fscache_n_cop_drop_object); - if (list_empty(&object->dependents) && - object->n_ops == 0 && - object->n_children == 0) - return transit_to(DROP_OBJECT); + if (parent) { + _debug("release parent OBJ%x {%d}", + parent->debug_id, parent->n_children); - if (object->n_in_progress == 0) { - spin_lock(&object->lock); - if (object->n_ops > 0 && object->n_in_progress == 0) - fscache_start_operations(object); - spin_unlock(&object->lock); + spin_lock(&parent->lock); + parent->n_children--; + if (parent->n_children == 0) + fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); + spin_unlock(&parent->lock); + object->parent = NULL; } - if (!list_empty(&object->dependents)) - return transit_to(KILL_DEPENDENTS); + /* this just shifts the object release to the work processor */ + fscache_put_object(object); - return transit_to(WAIT_FOR_CLEARANCE); + _leave(""); } /* - * Kill dependent objects. + * release or recycle an object that the netfs has discarded */ -static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object, - int event) +static void fscache_release_object(struct fscache_object *object) { - _enter("{OBJ%x},%d", object->debug_id, event); + _enter(""); - if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL)) - return NO_TRANSIT; /* Not finished */ - return transit_to(WAIT_FOR_CLEARANCE); + fscache_drop_object(object); } /* - * Drop an object's attachments + * withdraw an object from active service */ -static const struct fscache_state *fscache_drop_object(struct fscache_object *object, - int event) +static void fscache_withdraw_object(struct fscache_object *object) { - struct fscache_object *parent = object->parent; - struct fscache_cookie *cookie = object->cookie; - struct fscache_cache *cache = object->cache; - bool awaken = false; + struct fscache_cookie *cookie; + bool detached; - _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event); + _enter(""); - ASSERT(cookie != NULL); - ASSERT(!hlist_unhashed(&object->cookie_link)); + spin_lock(&object->lock); + cookie = object->cookie; + if (cookie) { + /* need to get the cookie lock before the object lock, starting + * from the object pointer */ + atomic_inc(&cookie->usage); + spin_unlock(&object->lock); - /* Make sure the cookie no longer points here and that the netfs isn't - * waiting for us. - */ - spin_lock(&cookie->lock); - hlist_del_init(&object->cookie_link); - if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) - awaken = true; - spin_unlock(&cookie->lock); + detached = false; + spin_lock(&cookie->lock); + spin_lock(&object->lock); - if (awaken) - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); + if (object->cookie == cookie) { + hlist_del_init(&object->cookie_link); + object->cookie = NULL; + fscache_invalidation_complete(cookie); + detached = true; + } + spin_unlock(&cookie->lock); + fscache_cookie_put(cookie); + if (detached) + fscache_cookie_put(cookie); + } - /* Prevent a race with our last child, which has to signal EV_CLEARED - * before dropping our spinlock. - */ - spin_lock(&object->lock); spin_unlock(&object->lock); - /* Discard from the cache's collection of objects */ - spin_lock(&cache->object_list_lock); - list_del_init(&object->cache_link); - spin_unlock(&cache->object_list_lock); + fscache_drop_object(object); +} - fscache_stat(&fscache_n_cop_drop_object); - cache->ops->drop_object(object); - fscache_stat_d(&fscache_n_cop_drop_object); +/* + * withdraw an object from active service at the behest of the cache + * - need break the links to a cached object cookie + * - called under two situations: + * (1) recycler decides to reclaim an in-use object + * (2) a cache is unmounted + * - have to take care as the cookie can be being relinquished by the netfs + * simultaneously + * - the object is pinned by the caller holding a refcount on it + */ +void fscache_withdrawing_object(struct fscache_cache *cache, + struct fscache_object *object) +{ + bool enqueue = false; - /* The parent object wants to know when all it dependents have gone */ - if (parent) { - _debug("release parent OBJ%x {%d}", - parent->debug_id, parent->n_children); + _enter(",OBJ%x", object->debug_id); - spin_lock(&parent->lock); - parent->n_children--; - if (parent->n_children == 0) - fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); - spin_unlock(&parent->lock); - object->parent = NULL; + spin_lock(&object->lock); + if (object->state < FSCACHE_OBJECT_WITHDRAWING) { + object->state = FSCACHE_OBJECT_WITHDRAWING; + enqueue = true; } + spin_unlock(&object->lock); - /* this just shifts the object release to the work processor */ - fscache_put_object(object); - fscache_stat(&fscache_n_object_dead); + if (enqueue) + fscache_enqueue_object(object); _leave(""); - return transit_to(OBJECT_DEAD); } /* @@ -736,7 +771,7 @@ static int fscache_get_object(struct fscache_object *object) } /* - * Discard a ref on an object + * discard a ref on a work item */ static void fscache_put_object(struct fscache_object *object) { @@ -745,22 +780,6 @@ static void fscache_put_object(struct fscache_object *object) fscache_stat_d(&fscache_n_cop_put_object); } -/** - * fscache_object_destroy - Note that a cache object is about to be destroyed - * @object: The object to be destroyed - * - * Note the imminent destruction and deallocation of a cache object record. - */ -void fscache_object_destroy(struct fscache_object *object) -{ - fscache_objlist_remove(object); - - /* We can get rid of the cookie now */ - fscache_cookie_put(object->cookie); - object->cookie = NULL; -} -EXPORT_SYMBOL(fscache_object_destroy); - /* * enqueue an object for metadata-type processing */ @@ -784,7 +803,7 @@ void fscache_enqueue_object(struct fscache_object *object) /** * fscache_object_sleep_till_congested - Sleep until object wq is congested - * @timeoutp: Scheduler sleep timeout + * @timoutp: Scheduler sleep timeout * * Allow an object handler to sleep until the object workqueue is congested. * @@ -812,21 +831,18 @@ bool fscache_object_sleep_till_congested(signed long *timeoutp) EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested); /* - * Enqueue the dependents of an object for metadata-type processing. - * - * If we don't manage to finish the list before the scheduler wants to run - * again then return false immediately. We return true if the list was - * cleared. + * enqueue the dependents of an object for metadata-type processing + * - the caller must hold the object's lock + * - this may cause an already locked object to wind up being processed again */ -static bool fscache_enqueue_dependents(struct fscache_object *object, int event) +static void fscache_enqueue_dependents(struct fscache_object *object) { struct fscache_object *dep; - bool ret = true; _enter("{OBJ%x}", object->debug_id); if (list_empty(&object->dependents)) - return true; + return; spin_lock(&object->lock); @@ -835,23 +851,23 @@ static bool fscache_enqueue_dependents(struct fscache_object *object, int event) struct fscache_object, dep_link); list_del_init(&dep->dep_link); - fscache_raise_event(dep, event); + + /* sort onto appropriate lists */ + fscache_enqueue_object(dep); fscache_put_object(dep); - if (!list_empty(&object->dependents) && need_resched()) { - ret = false; - break; - } + if (!list_empty(&object->dependents)) + cond_resched_lock(&object->lock); } spin_unlock(&object->lock); - return ret; } /* * remove an object from whatever queue it's waiting on + * - the caller must hold object->lock */ -static void fscache_dequeue_object(struct fscache_object *object) +void fscache_dequeue_object(struct fscache_object *object) { _enter("{OBJ%x}", object->debug_id); @@ -870,10 +886,7 @@ static void fscache_dequeue_object(struct fscache_object *object) * @data: The auxiliary data for the object * @datalen: The size of the auxiliary data * - * This function consults the netfs about the coherency state of an object. - * The caller must be holding a ref on cookie->n_active (held by - * fscache_look_up_object() on behalf of the cache backend during object lookup - * and creation). + * This function consults the netfs about the coherency state of an object */ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, const void *data, uint16_t datalen) @@ -914,23 +927,12 @@ EXPORT_SYMBOL(fscache_check_aux); /* * Asynchronously invalidate an object. */ -static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object, - int event) +static void fscache_invalidate_object(struct fscache_object *object) { struct fscache_operation *op; struct fscache_cookie *cookie = object->cookie; - _enter("{OBJ%x},%d", object->debug_id, event); - - /* We're going to need the cookie. If the cookie is not available then - * retire the object instead. - */ - if (!fscache_use_cookie(object)) { - ASSERT(object->cookie->stores.rnode == NULL); - set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags); - _leave(" [no cookie]"); - return transit_to(KILL_OBJECT); - } + _enter("{OBJ%x}", object->debug_id); /* Reject any new read/write ops and abort any that are pending. */ fscache_invalidate_writes(cookie); @@ -939,13 +941,14 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj /* Now we have to wait for in-progress reads and writes */ op = kzalloc(sizeof(*op), GFP_KERNEL); - if (!op) - goto nomem; + if (!op) { + fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR); + _leave(" [ENOMEM]"); + return; + } fscache_operation_init(op, object->cache->ops->invalidate_object, NULL); - op->flags = FSCACHE_OP_ASYNC | - (1 << FSCACHE_OP_EXCLUSIVE) | - (1 << FSCACHE_OP_UNUSE_COOKIE); + op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE); spin_lock(&cookie->lock); if (fscache_submit_exclusive_op(object, op) < 0) @@ -962,50 +965,13 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj /* We can allow read and write requests to come in once again. They'll * queue up behind our exclusive invalidation operation. */ - if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); - _leave(" [ok]"); - return transit_to(UPDATE_OBJECT); - -nomem: - clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); - fscache_unuse_cookie(object); - _leave(" [ENOMEM]"); - return transit_to(KILL_OBJECT); + fscache_invalidation_complete(cookie); + _leave(""); + return; submit_op_failed: - clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); spin_unlock(&cookie->lock); kfree(op); + fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR); _leave(" [EIO]"); - return transit_to(KILL_OBJECT); -} - -static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object, - int event) -{ - const struct fscache_state *s; - - fscache_stat(&fscache_n_invalidates_run); - fscache_stat(&fscache_n_cop_invalidate_object); - s = _fscache_invalidate_object(object, event); - fscache_stat_d(&fscache_n_cop_invalidate_object); - return s; -} - -/* - * Asynchronously update an object. - */ -static const struct fscache_state *fscache_update_object(struct fscache_object *object, - int event) -{ - _enter("{OBJ%x},%d", object->debug_id, event); - - fscache_stat(&fscache_n_updates_run); - fscache_stat(&fscache_n_cop_update_object); - object->cache->ops->update_object(object); - fscache_stat_d(&fscache_n_cop_update_object); - - _leave(""); - return transit_to(WAIT_FOR_CMD); } diff --git a/trunk/fs/fscache/operation.c b/trunk/fs/fscache/operation.c index 318071aca217..762a9ec4ffa4 100644 --- a/trunk/fs/fscache/operation.c +++ b/trunk/fs/fscache/operation.c @@ -35,7 +35,7 @@ void fscache_enqueue_operation(struct fscache_operation *op) ASSERT(list_empty(&op->pend_link)); ASSERT(op->processor != NULL); - ASSERT(fscache_object_is_available(op->object)); + ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); ASSERTCMP(atomic_read(&op->usage), >, 0); ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); @@ -119,7 +119,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, /* need to issue a new write op after this */ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); ret = 0; - } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { + } else if (object->state == FSCACHE_OBJECT_CREATING) { op->object = object; object->n_ops++; object->n_exclusive++; /* reads and writes must wait */ @@ -144,7 +144,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, */ static void fscache_report_unexpected_submission(struct fscache_object *object, struct fscache_operation *op, - const struct fscache_state *ostate) + unsigned long ostate) { static bool once_only; struct fscache_operation *p; @@ -155,8 +155,11 @@ static void fscache_report_unexpected_submission(struct fscache_object *object, once_only = true; kdebug("unexpected submission OP%x [OBJ%x %s]", - op->debug_id, object->debug_id, object->state->name); - kdebug("objstate=%s [%s]", object->state->name, ostate->name); + op->debug_id, object->debug_id, + fscache_object_states[object->state]); + kdebug("objstate=%s [%s]", + fscache_object_states[object->state], + fscache_object_states[ostate]); kdebug("objflags=%lx", object->flags); kdebug("objevent=%lx [%lx]", object->events, object->event_mask); kdebug("ops=%u inp=%u exc=%u", @@ -187,7 +190,7 @@ static void fscache_report_unexpected_submission(struct fscache_object *object, int fscache_submit_op(struct fscache_object *object, struct fscache_operation *op) { - const struct fscache_state *ostate; + unsigned long ostate; int ret; _enter("{OBJ%x OP%x},{%u}", @@ -223,14 +226,16 @@ int fscache_submit_op(struct fscache_object *object, fscache_run_op(object, op); } ret = 0; - } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { + } else if (object->state == FSCACHE_OBJECT_CREATING) { op->object = object; object->n_ops++; atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); ret = 0; - } else if (fscache_object_is_dying(object)) { + } else if (object->state == FSCACHE_OBJECT_DYING || + object->state == FSCACHE_OBJECT_LC_DYING || + object->state == FSCACHE_OBJECT_WITHDRAWING) { fscache_stat(&fscache_n_op_rejected); op->state = FSCACHE_OP_ST_CANCELLED; ret = -ENOBUFS; @@ -260,8 +265,8 @@ void fscache_abort_object(struct fscache_object *object) } /* - * Jump start the operation processing on an object. The caller must hold - * object->lock. + * jump start the operation processing on an object + * - caller must hold object->lock */ void fscache_start_operations(struct fscache_object *object) { @@ -423,10 +428,14 @@ void fscache_put_operation(struct fscache_operation *op) object = op->object; - if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) - atomic_dec(&object->n_reads); - if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags)) - fscache_unuse_cookie(object); + if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) { + if (atomic_dec_and_test(&object->n_reads)) { + clear_bit(FSCACHE_COOKIE_WAITING_ON_READS, + &object->cookie->flags); + wake_up_bit(&object->cookie->flags, + FSCACHE_COOKIE_WAITING_ON_READS); + } + } /* now... we may get called with the object spinlock held, so we * complete the cleanup here only if we can immediately acquire the diff --git a/trunk/fs/fscache/page.c b/trunk/fs/fscache/page.c index d479ab3c63e4..ff000e52072d 100644 --- a/trunk/fs/fscache/page.c +++ b/trunk/fs/fscache/page.c @@ -109,7 +109,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie, * allocator as the work threads writing to the cache may all end up * sleeping on memory allocation, so we may need to impose a timeout * too. */ - if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) { + if (!(gfp & __GFP_WAIT)) { fscache_stat(&fscache_n_store_vmscan_busy); return false; } @@ -163,12 +163,10 @@ static void fscache_attr_changed_op(struct fscache_operation *op) fscache_stat(&fscache_n_attr_changed_calls); - if (fscache_object_is_active(object) && - fscache_use_cookie(object)) { + if (fscache_object_is_active(object)) { fscache_stat(&fscache_n_cop_attr_changed); ret = object->cache->ops->attr_changed(object); fscache_stat_d(&fscache_n_cop_attr_changed); - fscache_unuse_cookie(object); if (ret < 0) fscache_abort_object(object); } @@ -235,7 +233,7 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op) _enter("{OP%x}", op->op.debug_id); - ASSERTCMP(atomic_read(&op->n_pages), ==, 0); + ASSERTCMP(op->n_pages, ==, 0); fscache_hist(fscache_retrieval_histogram, op->start_time); if (op->context) @@ -248,7 +246,6 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op) * allocate a retrieval op */ static struct fscache_retrieval *fscache_alloc_retrieval( - struct fscache_cookie *cookie, struct address_space *mapping, fscache_rw_complete_t end_io_func, void *context) @@ -263,10 +260,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval( } fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op); - atomic_inc(&cookie->n_active); - op->op.flags = FSCACHE_OP_MYTHREAD | - (1UL << FSCACHE_OP_WAITING) | - (1UL << FSCACHE_OP_UNUSE_COOKIE); + op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING); op->mapping = mapping; op->end_io_func = end_io_func; op->context = context; @@ -316,7 +310,7 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op) struct fscache_retrieval *op = container_of(_op, struct fscache_retrieval, op); - atomic_set(&op->n_pages, 0); + op->n_pages = 0; } /* @@ -400,13 +394,12 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, if (fscache_wait_for_deferred_lookup(cookie) < 0) return -ERESTARTSYS; - op = fscache_alloc_retrieval(cookie, page->mapping, - end_io_func,context); + op = fscache_alloc_retrieval(page->mapping, end_io_func, context); if (!op) { _leave(" = -ENOMEM"); return -ENOMEM; } - atomic_set(&op->n_pages, 1); + op->n_pages = 1; spin_lock(&cookie->lock); @@ -415,7 +408,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); - ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)); + ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); atomic_inc(&object->n_reads); __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); @@ -472,7 +465,6 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, atomic_dec(&object->n_reads); nobufs_unlock: spin_unlock(&cookie->lock); - atomic_dec(&cookie->n_active); kfree(op); nobufs: fscache_stat(&fscache_n_retrievals_nobufs); @@ -530,10 +522,10 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, if (fscache_wait_for_deferred_lookup(cookie) < 0) return -ERESTARTSYS; - op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); + op = fscache_alloc_retrieval(mapping, end_io_func, context); if (!op) return -ENOMEM; - atomic_set(&op->n_pages, *nr_pages); + op->n_pages = *nr_pages; spin_lock(&cookie->lock); @@ -597,7 +589,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, atomic_dec(&object->n_reads); nobufs_unlock: spin_unlock(&cookie->lock); - atomic_dec(&cookie->n_active); kfree(op); nobufs: fscache_stat(&fscache_n_retrievals_nobufs); @@ -640,10 +631,10 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, if (fscache_wait_for_deferred_lookup(cookie) < 0) return -ERESTARTSYS; - op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); + op = fscache_alloc_retrieval(page->mapping, NULL, NULL); if (!op) return -ENOMEM; - atomic_set(&op->n_pages, 1); + op->n_pages = 1; spin_lock(&cookie->lock); @@ -684,7 +675,6 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, nobufs_unlock: spin_unlock(&cookie->lock); - atomic_dec(&cookie->n_active); kfree(op); nobufs: fscache_stat(&fscache_n_allocs_nobufs); @@ -739,9 +729,8 @@ static void fscache_write_op(struct fscache_operation *_op) */ spin_unlock(&object->lock); fscache_op_complete(&op->op, false); - _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}", - _op->flags, _op->state, object->state->short_name, - object->flags); + _leave(" [cancel] op{f=%lx s=%u} obj{s=%u f=%lx}", + _op->flags, _op->state, object->state, object->flags); return; } @@ -807,16 +796,11 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) _enter(""); - for (;;) { - spin_lock(&cookie->stores_lock); - n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, - ARRAY_SIZE(results), - FSCACHE_COOKIE_PENDING_TAG); - if (n == 0) { - spin_unlock(&cookie->stores_lock); - break; - } - + while (spin_lock(&cookie->stores_lock), + n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, + ARRAY_SIZE(results), + FSCACHE_COOKIE_PENDING_TAG), + n > 0) { for (i = n - 1; i >= 0; i--) { page = results[i]; radix_tree_delete(&cookie->stores, page->index); @@ -828,6 +812,7 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) page_cache_release(results[i]); } + spin_unlock(&cookie->stores_lock); _leave(""); } @@ -844,12 +829,14 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is * set) * - * (a) no writes yet + * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred + * fill op) * * (b) writes deferred till post-creation (mark page for writing and * return immediately) * * (2) negative lookup, object created, initial fill being made from netfs + * (FSCACHE_COOKIE_INITIAL_FILL is set) * * (a) fill point not yet reached this page (mark page for writing and * return) @@ -886,9 +873,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, fscache_operation_init(&op->op, fscache_write_op, fscache_release_write_op); - op->op.flags = FSCACHE_OP_ASYNC | - (1 << FSCACHE_OP_WAITING) | - (1 << FSCACHE_OP_UNUSE_COOKIE); + op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING); ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); if (ret < 0) @@ -934,7 +919,6 @@ int __fscache_write_page(struct fscache_cookie *cookie, op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); op->store_limit = object->store_limit; - atomic_inc(&cookie->n_active); if (fscache_submit_op(object, &op->op) < 0) goto submit_failed; @@ -961,7 +945,6 @@ int __fscache_write_page(struct fscache_cookie *cookie, return 0; submit_failed: - atomic_dec(&cookie->n_active); spin_lock(&cookie->stores_lock); radix_tree_delete(&cookie->stores, page->index); spin_unlock(&cookie->stores_lock); diff --git a/trunk/fs/fuse/dir.c b/trunk/fs/fuse/dir.c index 0eda52738ec4..254df56b847b 100644 --- a/trunk/fs/fuse/dir.c +++ b/trunk/fs/fuse/dir.c @@ -14,7 +14,7 @@ #include #include -static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx) +static bool fuse_use_readdirplus(struct inode *dir, struct file *filp) { struct fuse_conn *fc = get_fuse_conn(dir); struct fuse_inode *fi = get_fuse_inode(dir); @@ -25,7 +25,7 @@ static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx) return true; if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state)) return true; - if (ctx->pos == 0) + if (filp->f_pos == 0) return true; return false; } @@ -180,8 +180,6 @@ u64 fuse_get_attr_version(struct fuse_conn *fc) static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) { struct inode *inode; - struct dentry *parent; - struct fuse_conn *fc; inode = ACCESS_ONCE(entry->d_inode); if (inode && is_bad_inode(inode)) @@ -189,8 +187,10 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) else if (fuse_dentry_time(entry) < get_jiffies_64()) { int err; struct fuse_entry_out outarg; + struct fuse_conn *fc; struct fuse_req *req; struct fuse_forget_link *forget; + struct dentry *parent; u64 attr_version; /* For negative dentries, always do a fresh lookup */ @@ -241,14 +241,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) entry_attr_timeout(&outarg), attr_version); fuse_change_entry_timeout(entry, &outarg); - } else if (inode) { - fc = get_fuse_conn(inode); - if (fc->readdirplus_auto) { - parent = dget_parent(entry); - fuse_advise_use_readdirplus(parent->d_inode); - dput(parent); - } } + fuse_advise_use_readdirplus(inode); return 1; } @@ -1165,23 +1159,25 @@ static int fuse_permission(struct inode *inode, int mask) } static int parse_dirfile(char *buf, size_t nbytes, struct file *file, - struct dir_context *ctx) + void *dstbuf, filldir_t filldir) { while (nbytes >= FUSE_NAME_OFFSET) { struct fuse_dirent *dirent = (struct fuse_dirent *) buf; size_t reclen = FUSE_DIRENT_SIZE(dirent); + int over; if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX) return -EIO; if (reclen > nbytes) break; - if (!dir_emit(ctx, dirent->name, dirent->namelen, - dirent->ino, dirent->type)) + over = filldir(dstbuf, dirent->name, dirent->namelen, + file->f_pos, dirent->ino, dirent->type); + if (over) break; buf += reclen; nbytes -= reclen; - ctx->pos = dirent->off; + file->f_pos = dirent->off; } return 0; @@ -1282,7 +1278,7 @@ static int fuse_direntplus_link(struct file *file, } static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, - struct dir_context *ctx, u64 attr_version) + void *dstbuf, filldir_t filldir, u64 attr_version) { struct fuse_direntplus *direntplus; struct fuse_dirent *dirent; @@ -1307,9 +1303,10 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, we need to send a FORGET for each of those which we did not link. */ - over = !dir_emit(ctx, dirent->name, dirent->namelen, - dirent->ino, dirent->type); - ctx->pos = dirent->off; + over = filldir(dstbuf, dirent->name, dirent->namelen, + file->f_pos, dirent->ino, + dirent->type); + file->f_pos = dirent->off; } buf += reclen; @@ -1323,7 +1320,7 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, return 0; } -static int fuse_readdir(struct file *file, struct dir_context *ctx) +static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) { int plus, err; size_t nbytes; @@ -1346,17 +1343,17 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) return -ENOMEM; } - plus = fuse_use_readdirplus(inode, ctx); + plus = fuse_use_readdirplus(inode, file); req->out.argpages = 1; req->num_pages = 1; req->pages[0] = page; req->page_descs[0].length = PAGE_SIZE; if (plus) { attr_version = fuse_get_attr_version(fc); - fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, + fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, FUSE_READDIRPLUS); } else { - fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, + fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, FUSE_READDIR); } fuse_request_send(fc, req); @@ -1366,11 +1363,11 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) if (!err) { if (plus) { err = parse_dirplusfile(page_address(page), nbytes, - file, ctx, + file, dstbuf, filldir, attr_version); } else { err = parse_dirfile(page_address(page), nbytes, file, - ctx); + dstbuf, filldir); } } @@ -1883,7 +1880,7 @@ static const struct inode_operations fuse_dir_inode_operations = { static const struct file_operations fuse_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = fuse_readdir, + .readdir = fuse_readdir, .open = fuse_dir_open, .release = fuse_dir_release, .fsync = fuse_dir_fsync, diff --git a/trunk/fs/fuse/file.c b/trunk/fs/fuse/file.c index 35f281033142..d1c9b85b3f58 100644 --- a/trunk/fs/fuse/file.c +++ b/trunk/fs/fuse/file.c @@ -16,7 +16,6 @@ #include #include #include -#include static const struct file_operations fuse_direct_io_file_operations; @@ -1279,10 +1278,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov, iov_iter_init(&ii, iov, nr_segs, count, 0); - if (io->async) - req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii)); - else - req = fuse_get_req(fc, fuse_iter_npages(&ii)); + req = fuse_get_req(fc, fuse_iter_npages(&ii)); if (IS_ERR(req)) return PTR_ERR(req); @@ -1318,11 +1314,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov, break; if (count) { fuse_put_request(fc, req); - if (io->async) - req = fuse_get_req_for_background(fc, - fuse_iter_npages(&ii)); - else - req = fuse_get_req(fc, fuse_iter_npages(&ii)); + req = fuse_get_req(fc, fuse_iter_npages(&ii)); if (IS_ERR(req)) break; } @@ -2373,11 +2365,6 @@ static void fuse_do_truncate(struct file *file) fuse_do_setattr(inode, &attr, file); } -static inline loff_t fuse_round_up(loff_t off) -{ - return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); -} - static ssize_t fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) @@ -2385,7 +2372,6 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, ssize_t ret = 0; struct file *file = iocb->ki_filp; struct fuse_file *ff = file->private_data; - bool async_dio = ff->fc->async_dio; loff_t pos = 0; struct inode *inode; loff_t i_size; @@ -2397,10 +2383,10 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, i_size = i_size_read(inode); /* optimization for short read */ - if (async_dio && rw != WRITE && offset + count > i_size) { + if (rw != WRITE && offset + count > i_size) { if (offset >= i_size) return 0; - count = min_t(loff_t, count, fuse_round_up(i_size - offset)); + count = i_size - offset; } io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); @@ -2418,7 +2404,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, * By default, we want to optimize all I/Os with async request * submission to the client filesystem if supported. */ - io->async = async_dio; + io->async = ff->fc->async_dio; io->iocb = iocb; /* @@ -2426,7 +2412,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, * to wait on real async I/O requests, so we must submit this request * synchronously. */ - if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE) + if (!is_sync_kiocb(iocb) && (offset + count > i_size)) io->async = false; if (rw == WRITE) @@ -2438,7 +2424,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, fuse_aio_complete(io, ret < 0 ? ret : 0, -1); /* we have a non-extending, async request, so return */ - if (!is_sync_kiocb(iocb)) + if (ret > 0 && !is_sync_kiocb(iocb)) return -EIOCBQUEUED; ret = wait_on_sync_kiocb(iocb); @@ -2460,7 +2446,6 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, loff_t length) { struct fuse_file *ff = file->private_data; - struct inode *inode = file->f_inode; struct fuse_conn *fc = ff->fc; struct fuse_req *req; struct fuse_fallocate_in inarg = { @@ -2470,23 +2455,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, .mode = mode }; int err; - bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || - (mode & FALLOC_FL_PUNCH_HOLE); if (fc->no_fallocate) return -EOPNOTSUPP; - if (lock_inode) { - mutex_lock(&inode->i_mutex); - if (mode & FALLOC_FL_PUNCH_HOLE) - fuse_set_nowrite(inode); - } - req = fuse_get_req_nopages(fc); - if (IS_ERR(req)) { - err = PTR_ERR(req); - goto out; - } + if (IS_ERR(req)) + return PTR_ERR(req); req->in.h.opcode = FUSE_FALLOCATE; req->in.h.nodeid = ff->nodeid; @@ -2501,25 +2476,6 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, } fuse_put_request(fc, req); - if (err) - goto out; - - /* we could have extended the file */ - if (!(mode & FALLOC_FL_KEEP_SIZE)) - fuse_write_update_size(inode, offset + length); - - if (mode & FALLOC_FL_PUNCH_HOLE) - truncate_pagecache_range(inode, offset, offset + length - 1); - - fuse_invalidate_attr(inode); - -out: - if (lock_inode) { - if (mode & FALLOC_FL_PUNCH_HOLE) - fuse_release_nowrite(inode); - mutex_unlock(&inode->i_mutex); - } - return err; } diff --git a/trunk/fs/fuse/inode.c b/trunk/fs/fuse/inode.c index 9a0cdde14a08..6201f81e4d3a 100644 --- a/trunk/fs/fuse/inode.c +++ b/trunk/fs/fuse/inode.c @@ -867,11 +867,10 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->dont_mask = 1; if (arg->flags & FUSE_AUTO_INVAL_DATA) fc->auto_inval_data = 1; - if (arg->flags & FUSE_DO_READDIRPLUS) { + if (arg->flags & FUSE_DO_READDIRPLUS) fc->do_readdirplus = 1; - if (arg->flags & FUSE_READDIRPLUS_AUTO) - fc->readdirplus_auto = 1; - } + if (arg->flags & FUSE_READDIRPLUS_AUTO) + fc->readdirplus_auto = 1; if (arg->flags & FUSE_ASYNC_DIO) fc->async_dio = 1; } else { diff --git a/trunk/fs/gfs2/Kconfig b/trunk/fs/gfs2/Kconfig index 5a376ab81feb..eb08c9e43c2a 100644 --- a/trunk/fs/gfs2/Kconfig +++ b/trunk/fs/gfs2/Kconfig @@ -26,7 +26,7 @@ config GFS2_FS config GFS2_FS_LOCKING_DLM bool "GFS2 DLM locking" depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && \ - HOTPLUG && CONFIGFS_FS && SYSFS && (DLM=y || DLM=GFS2_FS) + HOTPLUG && DLM && CONFIGFS_FS && SYSFS help Multiple node locking module for GFS2 diff --git a/trunk/fs/gfs2/aops.c b/trunk/fs/gfs2/aops.c index ee48ad37d9c0..0bad69ed6336 100644 --- a/trunk/fs/gfs2/aops.c +++ b/trunk/fs/gfs2/aops.c @@ -110,7 +110,7 @@ static int gfs2_writepage_common(struct page *page, /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index > end_index || (page->index == end_index && !offset)) { - page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0); goto out; } return 1; @@ -299,8 +299,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, /* Is the page fully outside i_size? (truncate in progress) */ if (page->index > end_index || (page->index == end_index && !offset)) { - page->mapping->a_ops->invalidatepage(page, 0, - PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0); unlock_page(page); continue; } @@ -944,33 +943,27 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) unlock_buffer(bh); } -static void gfs2_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void gfs2_invalidatepage(struct page *page, unsigned long offset) { struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); - unsigned int stop = offset + length; - int partial_page = (offset || length < PAGE_CACHE_SIZE); struct buffer_head *bh, *head; unsigned long pos = 0; BUG_ON(!PageLocked(page)); - if (!partial_page) + if (offset == 0) ClearPageChecked(page); if (!page_has_buffers(page)) goto out; bh = head = page_buffers(page); do { - if (pos + bh->b_size > stop) - return; - if (offset <= pos) gfs2_discard(sdp, bh); pos += bh->b_size; bh = bh->b_this_page; } while (bh != head); out: - if (!partial_page) + if (offset == 0) try_to_release_page(page, 0); } diff --git a/trunk/fs/gfs2/bmap.c b/trunk/fs/gfs2/bmap.c index 5e2f56fccf6b..1dc9a13ce6bb 100644 --- a/trunk/fs/gfs2/bmap.c +++ b/trunk/fs/gfs2/bmap.c @@ -1232,9 +1232,7 @@ static int do_grow(struct inode *inode, u64 size) unstuff = 1; } - error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT + - (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ? - 0 : RES_QUOTA), 0); + error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0); if (error) goto do_grow_release; @@ -1288,26 +1286,17 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize) if (ret) return ret; - ret = get_write_access(inode); - if (ret) - return ret; - inode_dio_wait(inode); ret = gfs2_rs_alloc(GFS2_I(inode)); if (ret) - goto out; + return ret; oldsize = inode->i_size; - if (newsize >= oldsize) { - ret = do_grow(inode, newsize); - goto out; - } + if (newsize >= oldsize) + return do_grow(inode, newsize); - ret = do_shrink(inode, oldsize, newsize); -out: - put_write_access(inode); - return ret; + return do_shrink(inode, oldsize, newsize); } int gfs2_truncatei_resume(struct gfs2_inode *ip) diff --git a/trunk/fs/gfs2/dir.c b/trunk/fs/gfs2/dir.c index 0cb4c1557f20..c3e82bd23179 100644 --- a/trunk/fs/gfs2/dir.c +++ b/trunk/fs/gfs2/dir.c @@ -354,31 +354,22 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip) return ERR_PTR(-EIO); } - hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN); - if (hc == NULL) - hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL); - + hc = kmalloc(hsize, GFP_NOFS); + ret = -ENOMEM; if (hc == NULL) return ERR_PTR(-ENOMEM); ret = gfs2_dir_read_data(ip, hc, hsize); if (ret < 0) { - if (is_vmalloc_addr(hc)) - vfree(hc); - else - kfree(hc); + kfree(hc); return ERR_PTR(ret); } spin_lock(&inode->i_lock); - if (ip->i_hash_cache) { - if (is_vmalloc_addr(hc)) - vfree(hc); - else - kfree(hc); - } else { + if (ip->i_hash_cache) + kfree(hc); + else ip->i_hash_cache = hc; - } spin_unlock(&inode->i_lock); return ip->i_hash_cache; @@ -394,10 +385,7 @@ void gfs2_dir_hash_inval(struct gfs2_inode *ip) { __be64 *hc = ip->i_hash_cache; ip->i_hash_cache = NULL; - if (is_vmalloc_addr(hc)) - vfree(hc); - else - kfree(hc); + kfree(hc); } static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent) @@ -1125,14 +1113,10 @@ static int dir_double_exhash(struct gfs2_inode *dip) if (IS_ERR(hc)) return PTR_ERR(hc); - hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN); - if (hc2 == NULL) - hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL); - + h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS); if (!hc2) return -ENOMEM; - h = hc2; error = gfs2_meta_inode_buffer(dip, &dibh); if (error) goto out_kfree; @@ -1161,10 +1145,7 @@ static int dir_double_exhash(struct gfs2_inode *dip) gfs2_dinode_out(dip, dibh->b_data); brelse(dibh); out_kfree: - if (is_vmalloc_addr(hc2)) - vfree(hc2); - else - kfree(hc2); + kfree(hc2); return error; } @@ -1213,7 +1194,9 @@ static int compare_dents(const void *a, const void *b) /** * do_filldir_main - read out directory entries * @dip: The GFS2 inode - * @ctx: what to feed the entries to + * @offset: The offset in the file to read from + * @opaque: opaque data to pass to filldir + * @filldir: The function to pass entries to * @darr: an array of struct gfs2_dirent pointers to read * @entries: the number of entries in darr * @copied: pointer to int that's non-zero if a entry has been copied out @@ -1223,10 +1206,11 @@ static int compare_dents(const void *a, const void *b) * the possibility that they will fall into different readdir buffers or * that someone will want to seek to that location. * - * Returns: errno, >0 if the actor tells you to stop + * Returns: errno, >0 on exception from filldir */ -static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx, +static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, + void *opaque, filldir_t filldir, const struct gfs2_dirent **darr, u32 entries, int *copied) { @@ -1234,6 +1218,7 @@ static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx, u64 off, off_next; unsigned int x, y; int run = 0; + int error = 0; sort(darr, entries, sizeof(struct gfs2_dirent *), compare_dents, NULL); @@ -1250,9 +1235,9 @@ static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx, off_next = be32_to_cpu(dent_next->de_hash); off_next = gfs2_disk_hash2offset(off_next); - if (off < ctx->pos) + if (off < *offset) continue; - ctx->pos = off; + *offset = off; if (off_next == off) { if (*copied && !run) @@ -1261,25 +1246,26 @@ static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx, } else run = 0; } else { - if (off < ctx->pos) + if (off < *offset) continue; - ctx->pos = off; + *offset = off; } - if (!dir_emit(ctx, (const char *)(dent + 1), + error = filldir(opaque, (const char *)(dent + 1), be16_to_cpu(dent->de_name_len), - be64_to_cpu(dent->de_inum.no_addr), - be16_to_cpu(dent->de_type))) + off, be64_to_cpu(dent->de_inum.no_addr), + be16_to_cpu(dent->de_type)); + if (error) return 1; *copied = 1; } - /* Increment the ctx->pos by one, so the next time we come into the + /* Increment the *offset by one, so the next time we come into the do_filldir fxn, we get the next entry instead of the last one in the current leaf */ - ctx->pos++; + (*offset)++; return 0; } @@ -1303,8 +1289,8 @@ static void gfs2_free_sort_buffer(void *ptr) kfree(ptr); } -static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx, - int *copied, unsigned *depth, +static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, + filldir_t filldir, int *copied, unsigned *depth, u64 leaf_no) { struct gfs2_inode *ip = GFS2_I(inode); @@ -1382,7 +1368,8 @@ static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx, } while(lfn); BUG_ON(entries2 != entries); - error = do_filldir_main(ip, ctx, darr, entries, copied); + error = do_filldir_main(ip, offset, opaque, filldir, darr, + entries, copied); out_free: for(i = 0; i < leaf; i++) brelse(larr[i]); @@ -1441,13 +1428,15 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index, /** * dir_e_read - Reads the entries from a directory into a filldir buffer * @dip: dinode pointer - * @ctx: actor to feed the entries to + * @offset: the hash of the last entry read shifted to the right once + * @opaque: buffer for the filldir function to fill + * @filldir: points to the filldir function to use * * Returns: errno */ -static int dir_e_read(struct inode *inode, struct dir_context *ctx, - struct file_ra_state *f_ra) +static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, + filldir_t filldir, struct file_ra_state *f_ra) { struct gfs2_inode *dip = GFS2_I(inode); u32 hsize, len = 0; @@ -1458,7 +1447,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx, unsigned depth = 0; hsize = 1 << dip->i_depth; - hash = gfs2_dir_offset2hash(ctx->pos); + hash = gfs2_dir_offset2hash(*offset); index = hash >> (32 - dip->i_depth); if (dip->i_hash_cache == NULL) @@ -1470,7 +1459,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx, gfs2_dir_readahead(inode, hsize, index, f_ra); while (index < hsize) { - error = gfs2_dir_read_leaf(inode, ctx, + error = gfs2_dir_read_leaf(inode, offset, opaque, filldir, &copied, &depth, be64_to_cpu(lp[index])); if (error) @@ -1485,8 +1474,8 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx, return error; } -int gfs2_dir_read(struct inode *inode, struct dir_context *ctx, - struct file_ra_state *f_ra) +int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, + filldir_t filldir, struct file_ra_state *f_ra) { struct gfs2_inode *dip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); @@ -1500,7 +1489,7 @@ int gfs2_dir_read(struct inode *inode, struct dir_context *ctx, return 0; if (dip->i_diskflags & GFS2_DIF_EXHASH) - return dir_e_read(inode, ctx, f_ra); + return dir_e_read(inode, offset, opaque, filldir, f_ra); if (!gfs2_is_stuffed(dip)) { gfs2_consist_inode(dip); @@ -1532,7 +1521,7 @@ int gfs2_dir_read(struct inode *inode, struct dir_context *ctx, error = -EIO; goto out; } - error = do_filldir_main(dip, ctx, darr, + error = do_filldir_main(dip, offset, opaque, filldir, darr, dip->i_entries, &copied); out: kfree(darr); @@ -1548,9 +1537,9 @@ int gfs2_dir_read(struct inode *inode, struct dir_context *ctx, /** * gfs2_dir_search - Search a directory - * @dip: The GFS2 dir inode - * @name: The name we are looking up - * @fail_on_exist: Fail if the name exists rather than looking it up + * @dip: The GFS2 inode + * @filename: + * @inode: * * This routine searches a directory for a file or another directory. * Assumes a glock is held on dip. @@ -1558,25 +1547,22 @@ int gfs2_dir_read(struct inode *inode, struct dir_context *ctx, * Returns: errno */ -struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name, - bool fail_on_exist) +struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name) { struct buffer_head *bh; struct gfs2_dirent *dent; - u64 addr, formal_ino; - u16 dtype; + struct inode *inode; dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); if (dent) { if (IS_ERR(dent)) return ERR_CAST(dent); - dtype = be16_to_cpu(dent->de_type); - addr = be64_to_cpu(dent->de_inum.no_addr); - formal_ino = be64_to_cpu(dent->de_inum.no_formal_ino); + inode = gfs2_inode_lookup(dir->i_sb, + be16_to_cpu(dent->de_type), + be64_to_cpu(dent->de_inum.no_addr), + be64_to_cpu(dent->de_inum.no_formal_ino), 0); brelse(bh); - if (fail_on_exist) - return ERR_PTR(-EEXIST); - return gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino, 0); + return inode; } return ERR_PTR(-ENOENT); } @@ -1860,8 +1846,6 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); ht = kzalloc(size, GFP_NOFS); - if (ht == NULL) - ht = vzalloc(size); if (!ht) return -ENOMEM; @@ -1949,10 +1933,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, gfs2_rlist_free(&rlist); gfs2_quota_unhold(dip); out: - if (is_vmalloc_addr(ht)) - vfree(ht); - else - kfree(ht); + kfree(ht); return error; } diff --git a/trunk/fs/gfs2/dir.h b/trunk/fs/gfs2/dir.h index 4f03bbd1873f..98c960beab35 100644 --- a/trunk/fs/gfs2/dir.h +++ b/trunk/fs/gfs2/dir.h @@ -18,15 +18,14 @@ struct gfs2_inode; struct gfs2_inum; extern struct inode *gfs2_dir_search(struct inode *dir, - const struct qstr *filename, - bool fail_on_exist); + const struct qstr *filename); extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename, const struct gfs2_inode *ip); extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename, const struct gfs2_inode *ip); extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry); -extern int gfs2_dir_read(struct inode *inode, struct dir_context *ctx, - struct file_ra_state *f_ra); +extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, + filldir_t filldir, struct file_ra_state *f_ra); extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, const struct gfs2_inode *nip, unsigned int new_type); diff --git a/trunk/fs/gfs2/export.c b/trunk/fs/gfs2/export.c index 8b9b3775e2e7..9973df4ff565 100644 --- a/trunk/fs/gfs2/export.c +++ b/trunk/fs/gfs2/export.c @@ -64,7 +64,6 @@ static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len, } struct get_name_filldir { - struct dir_context ctx; struct gfs2_inum_host inum; char *name; }; @@ -89,11 +88,9 @@ static int gfs2_get_name(struct dentry *parent, char *name, struct inode *dir = parent->d_inode; struct inode *inode = child->d_inode; struct gfs2_inode *dip, *ip; - struct get_name_filldir gnfd = { - .ctx.actor = get_name_filldir, - .name = name - }; + struct get_name_filldir gnfd; struct gfs2_holder gh; + u64 offset = 0; int error; struct file_ra_state f_ra = { .start = 0 }; @@ -109,12 +106,13 @@ static int gfs2_get_name(struct dentry *parent, char *name, *name = 0; gnfd.inum.no_addr = ip->i_no_addr; gnfd.inum.no_formal_ino = ip->i_no_formal_ino; + gnfd.name = name; error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh); if (error) return error; - error = gfs2_dir_read(dir, &gnfd.ctx, &f_ra); + error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir, &f_ra); gfs2_glock_dq_uninit(&gh); diff --git a/trunk/fs/gfs2/file.c b/trunk/fs/gfs2/file.c index f99f9e8a325f..acd16764b133 100644 --- a/trunk/fs/gfs2/file.c +++ b/trunk/fs/gfs2/file.c @@ -82,28 +82,35 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence) } /** - * gfs2_readdir - Iterator for a directory + * gfs2_readdir - Read directory entries from a directory * @file: The directory to read from - * @ctx: What to feed directory entries to + * @dirent: Buffer for dirents + * @filldir: Function used to do the copying * * Returns: errno */ -static int gfs2_readdir(struct file *file, struct dir_context *ctx) +static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir) { struct inode *dir = file->f_mapping->host; struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_holder d_gh; + u64 offset = file->f_pos; int error; - error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); - if (error) + gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); + error = gfs2_glock_nq(&d_gh); + if (error) { + gfs2_holder_uninit(&d_gh); return error; + } - error = gfs2_dir_read(dir, ctx, &file->f_ra); + error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra); gfs2_glock_dq_uninit(&d_gh); + file->f_pos = offset; + return error; } @@ -395,20 +402,16 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) /* Update file times before taking page lock */ file_update_time(vma->vm_file); - ret = get_write_access(inode); - if (ret) - goto out; - ret = gfs2_rs_alloc(ip); if (ret) - goto out_write_access; + return ret; gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); if (ret) - goto out_uninit; + goto out; set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GIF_SW_PAGED, &ip->i_flags); @@ -477,15 +480,12 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) gfs2_quota_unlock(ip); out_unlock: gfs2_glock_dq(&gh); -out_uninit: +out: gfs2_holder_uninit(&gh); if (ret == 0) { set_page_dirty(page); wait_for_stable_page(page); } -out_write_access: - put_write_access(inode); -out: sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(ret); } @@ -531,30 +531,21 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) } /** - * gfs2_open_common - This is common to open and atomic_open - * @inode: The inode being opened - * @file: The file being opened - * - * This maybe called under a glock or not depending upon how it has - * been called. We must always be called under a glock for regular - * files, however. For other file types, it does not matter whether - * we hold the glock or not. + * gfs2_open - open a file + * @inode: the inode to open + * @file: the struct file for this opening * - * Returns: Error code or 0 for success + * Returns: errno */ -int gfs2_open_common(struct inode *inode, struct file *file) +static int gfs2_open(struct inode *inode, struct file *file) { + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder i_gh; struct gfs2_file *fp; - int ret; - - if (S_ISREG(inode->i_mode)) { - ret = generic_file_open(inode, file); - if (ret) - return ret; - } + int error; - fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS); + fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL); if (!fp) return -ENOMEM; @@ -562,43 +553,29 @@ int gfs2_open_common(struct inode *inode, struct file *file) gfs2_assert_warn(GFS2_SB(inode), !file->private_data); file->private_data = fp; - return 0; -} - -/** - * gfs2_open - open a file - * @inode: the inode to open - * @file: the struct file for this opening - * - * After atomic_open, this function is only used for opening files - * which are already cached. We must still get the glock for regular - * files to ensure that we have the file size uptodate for the large - * file check which is in the common code. That is only an issue for - * regular files though. - * - * Returns: errno - */ - -static int gfs2_open(struct inode *inode, struct file *file) -{ - struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_holder i_gh; - int error; - bool need_unlock = false; if (S_ISREG(ip->i_inode.i_mode)) { error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) - return error; - need_unlock = true; - } + goto fail; - error = gfs2_open_common(inode, file); + if (!(file->f_flags & O_LARGEFILE) && + i_size_read(inode) > MAX_NON_LFS) { + error = -EOVERFLOW; + goto fail_gunlock; + } - if (need_unlock) gfs2_glock_dq_uninit(&i_gh); + } + + return 0; +fail_gunlock: + gfs2_glock_dq_uninit(&i_gh); +fail: + file->private_data = NULL; + kfree(fp); return error; } @@ -617,10 +594,10 @@ static int gfs2_release(struct inode *inode, struct file *file) kfree(file->private_data); file->private_data = NULL; - if (!(file->f_mode & FMODE_WRITE)) - return 0; + if ((file->f_mode & FMODE_WRITE) && + (atomic_read(&inode->i_writecount) == 1)) + gfs2_rs_delete(ip); - gfs2_rs_delete(ip); return 0; } @@ -1064,7 +1041,7 @@ const struct file_operations gfs2_file_fops = { }; const struct file_operations gfs2_dir_fops = { - .iterate = gfs2_readdir, + .readdir = gfs2_readdir, .unlocked_ioctl = gfs2_ioctl, .open = gfs2_open, .release = gfs2_release, @@ -1094,7 +1071,7 @@ const struct file_operations gfs2_file_fops_nolock = { }; const struct file_operations gfs2_dir_fops_nolock = { - .iterate = gfs2_readdir, + .readdir = gfs2_readdir, .unlocked_ioctl = gfs2_ioctl, .open = gfs2_open, .release = gfs2_release, diff --git a/trunk/fs/gfs2/glops.c b/trunk/fs/gfs2/glops.c index 5f2e5224c51c..c66e99c97571 100644 --- a/trunk/fs/gfs2/glops.c +++ b/trunk/fs/gfs2/glops.c @@ -54,6 +54,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) struct gfs2_bufdata *bd, *tmp; struct buffer_head *bh; const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); + sector_t blocknr; gfs2_log_lock(sdp); spin_lock(&sdp->sd_ail_lock); @@ -64,6 +65,13 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) continue; gfs2_ail_error(gl, bh); } + blocknr = bh->b_blocknr; + bh->b_private = NULL; + gfs2_remove_from_ail(bd); /* drops ref on bh */ + + bd->bd_bh = NULL; + bd->bd_blkno = blocknr; + gfs2_trans_add_revoke(sdp, bd); } GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); diff --git a/trunk/fs/gfs2/inode.c b/trunk/fs/gfs2/inode.c index bbb2715171cd..8833a4f264e3 100644 --- a/trunk/fs/gfs2/inode.c +++ b/trunk/fs/gfs2/inode.c @@ -189,7 +189,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, return inode; fail_refresh: - ip->i_iopen_gh.gh_flags |= GL_NOCACHE; ip->i_iopen_gh.gh_gl->gl_object = NULL; gfs2_glock_dq_uninit(&ip->i_iopen_gh); fail_iopen: @@ -313,7 +312,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, goto out; } - inode = gfs2_dir_search(dir, name, false); + inode = gfs2_dir_search(dir, name); if (IS_ERR(inode)) error = PTR_ERR(inode); out: @@ -346,6 +345,17 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name, if (!dip->i_inode.i_nlink) return -ENOENT; + error = gfs2_dir_check(&dip->i_inode, name, NULL); + switch (error) { + case -ENOENT: + error = 0; + break; + case 0: + return -EEXIST; + default: + return error; + } + if (dip->i_entries == (u32)-1) return -EFBIG; if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1) @@ -535,7 +545,6 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip, * gfs2_create_inode - Create a new inode * @dir: The parent directory * @dentry: The new dentry - * @file: If non-NULL, the file which is being opened * @mode: The permissions on the new inode * @dev: For device nodes, this is the device number * @symname: For symlinks, this is the link destination @@ -545,9 +554,8 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip, */ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, - struct file *file, umode_t mode, dev_t dev, const char *symname, - unsigned int size, int excl, int *opened) + unsigned int size, int excl) { const struct qstr *name = &dentry->d_name; struct gfs2_holder ghs[2]; @@ -555,7 +563,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_glock *io_gl; - struct dentry *d; int error; u32 aflags = 0; int arq; @@ -576,29 +583,14 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, goto fail; error = create_ok(dip, name, mode); - if (error) - goto fail_gunlock; - - inode = gfs2_dir_search(dir, &dentry->d_name, !S_ISREG(mode) || excl); - error = PTR_ERR(inode); - if (!IS_ERR(inode)) { - d = d_splice_alias(inode, dentry); - error = 0; - if (file && !IS_ERR(d)) { - if (d == NULL) - d = dentry; - if (S_ISREG(inode->i_mode)) - error = finish_open(file, d, gfs2_open_common, opened); - else - error = finish_no_open(file, d); - } + if ((error == -EEXIST) && S_ISREG(mode) && !excl) { + inode = gfs2_lookupi(dir, &dentry->d_name, 0); gfs2_glock_dq_uninit(ghs); - if (IS_ERR(d)) - return PTR_RET(d); - return error; - } else if (error != -ENOENT) { - goto fail_gunlock; + d_instantiate(dentry, inode); + return IS_ERR(inode) ? PTR_ERR(inode) : 0; } + if (error) + goto fail_gunlock; arq = error = gfs2_diradd_alloc_required(dir, name); if (error < 0) @@ -693,12 +685,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, goto fail_gunlock3; mark_inode_dirty(inode); - d_instantiate(dentry, inode); - if (file) - error = finish_open(file, dentry, gfs2_open_common, opened); gfs2_glock_dq_uninit(ghs); gfs2_glock_dq_uninit(ghs + 1); - return error; + d_instantiate(dentry, inode); + return 0; fail_gunlock3: gfs2_glock_dq_uninit(ghs + 1); @@ -738,56 +728,36 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, static int gfs2_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { - return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl, NULL); + return gfs2_create_inode(dir, dentry, S_IFREG | mode, 0, NULL, 0, excl); } /** - * __gfs2_lookup - Look up a filename in a directory and return its inode + * gfs2_lookup - Look up a filename in a directory and return its inode * @dir: The directory inode * @dentry: The dentry of the new inode - * @file: File to be opened - * @opened: atomic_open flags + * @nd: passed from Linux VFS, ignored by us * + * Called by the VFS layer. Lock dir and call gfs2_lookupi() * * Returns: errno */ -static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry, - struct file *file, int *opened) -{ - struct inode *inode; - struct dentry *d; - struct gfs2_holder gh; - struct gfs2_glock *gl; - int error; - - inode = gfs2_lookupi(dir, &dentry->d_name, 0); - if (!inode) - return NULL; - if (IS_ERR(inode)) - return ERR_CAST(inode); - - gl = GFS2_I(inode)->i_gl; - error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); - if (error) { - iput(inode); - return ERR_PTR(error); - } - - d = d_splice_alias(inode, dentry); - if (file && S_ISREG(inode->i_mode)) - error = finish_open(file, dentry, gfs2_open_common, opened); - - gfs2_glock_dq_uninit(&gh); - if (error) - return ERR_PTR(error); - return d; -} - static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, - unsigned flags) + unsigned int flags) { - return __gfs2_lookup(dir, dentry, NULL, NULL); + struct inode *inode = gfs2_lookupi(dir, &dentry->d_name, 0); + if (inode && !IS_ERR(inode)) { + struct gfs2_glock *gl = GFS2_I(inode)->i_gl; + struct gfs2_holder gh; + int error; + error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); + if (error) { + iput(inode); + return ERR_PTR(error); + } + gfs2_glock_dq_uninit(&gh); + } + return d_splice_alias(inode, dentry); } /** @@ -1105,7 +1075,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry, if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1) return -ENAMETOOLONG; - return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0, NULL); + return gfs2_create_inode(dir, dentry, S_IFLNK | S_IRWXUGO, 0, symname, size, 0); } /** @@ -1121,7 +1091,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct gfs2_sbd *sdp = GFS2_SB(dir); unsigned dsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); - return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0, NULL); + return gfs2_create_inode(dir, dentry, S_IFDIR | mode, 0, NULL, dsize, 0); } /** @@ -1136,43 +1106,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { - return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0, NULL); -} - -/** - * gfs2_atomic_open - Atomically open a file - * @dir: The directory - * @dentry: The proposed new entry - * @file: The proposed new struct file - * @flags: open flags - * @mode: File mode - * @opened: Flag to say whether the file has been opened or not - * - * Returns: error code or 0 for success - */ - -static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry, - struct file *file, unsigned flags, - umode_t mode, int *opened) -{ - struct dentry *d; - bool excl = !!(flags & O_EXCL); - - d = __gfs2_lookup(dir, dentry, file, opened); - if (IS_ERR(d)) - return PTR_ERR(d); - if (d == NULL) - d = dentry; - if (d->d_inode) { - if (!(*opened & FILE_OPENED)) - return finish_no_open(file, d); - return 0; - } - - if (!(flags & O_CREAT)) - return -ENOENT; - - return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl, opened); + return gfs2_create_inode(dir, dentry, mode, dev, NULL, 0, 0); } /* @@ -1852,7 +1786,6 @@ const struct inode_operations gfs2_dir_iops = { .removexattr = gfs2_removexattr, .fiemap = gfs2_fiemap, .get_acl = gfs2_get_acl, - .atomic_open = gfs2_atomic_open, }; const struct inode_operations gfs2_symlink_iops = { diff --git a/trunk/fs/gfs2/inode.h b/trunk/fs/gfs2/inode.h index ba4d9492d422..c53c7477f6da 100644 --- a/trunk/fs/gfs2/inode.h +++ b/trunk/fs/gfs2/inode.h @@ -109,7 +109,6 @@ extern int gfs2_permission(struct inode *inode, int mask); extern int gfs2_setattr_simple(struct inode *inode, struct iattr *attr); extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); -extern int gfs2_open_common(struct inode *inode, struct file *file); extern const struct inode_operations gfs2_file_iops; extern const struct inode_operations gfs2_dir_iops; diff --git a/trunk/fs/gfs2/log.c b/trunk/fs/gfs2/log.c index 610613fb65b5..b404f4853034 100644 --- a/trunk/fs/gfs2/log.c +++ b/trunk/fs/gfs2/log.c @@ -211,16 +211,15 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) static int gfs2_ail1_empty(struct gfs2_sbd *sdp) { struct gfs2_trans *tr, *s; - int oldest_tr = 1; int ret; spin_lock(&sdp->sd_ail_lock); list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) { gfs2_ail1_empty_one(sdp, tr); - if (list_empty(&tr->tr_ail1_list) && oldest_tr) + if (list_empty(&tr->tr_ail1_list)) list_move(&tr->tr_list, &sdp->sd_ail2_list); else - oldest_tr = 0; + break; } ret = list_empty(&sdp->sd_ail1_list); spin_unlock(&sdp->sd_ail_lock); @@ -318,7 +317,7 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) { - unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); + unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize); unsigned wanted = blks + reserved_blks; DEFINE_WAIT(wait); int did_wait = 0; @@ -546,76 +545,6 @@ void gfs2_ordered_del_inode(struct gfs2_inode *ip) spin_unlock(&sdp->sd_ordered_lock); } -void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) -{ - struct buffer_head *bh = bd->bd_bh; - struct gfs2_glock *gl = bd->bd_gl; - - gfs2_remove_from_ail(bd); - bd->bd_bh = NULL; - bh->b_private = NULL; - bd->bd_blkno = bh->b_blocknr; - bd->bd_ops = &gfs2_revoke_lops; - sdp->sd_log_num_revoke++; - atomic_inc(&gl->gl_revokes); - set_bit(GLF_LFLUSH, &gl->gl_flags); - list_add(&bd->bd_list, &sdp->sd_log_le_revoke); -} - -void gfs2_write_revokes(struct gfs2_sbd *sdp) -{ - struct gfs2_trans *tr; - struct gfs2_bufdata *bd, *tmp; - int have_revokes = 0; - int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); - - gfs2_ail1_empty(sdp); - spin_lock(&sdp->sd_ail_lock); - list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { - list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) { - if (list_empty(&bd->bd_list)) { - have_revokes = 1; - goto done; - } - } - } -done: - spin_unlock(&sdp->sd_ail_lock); - if (have_revokes == 0) - return; - while (sdp->sd_log_num_revoke > max_revokes) - max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); - max_revokes -= sdp->sd_log_num_revoke; - if (!sdp->sd_log_num_revoke) { - atomic_dec(&sdp->sd_log_blks_free); - /* If no blocks have been reserved, we need to also - * reserve a block for the header */ - if (!sdp->sd_log_blks_reserved) - atomic_dec(&sdp->sd_log_blks_free); - } - gfs2_log_lock(sdp); - spin_lock(&sdp->sd_ail_lock); - list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { - list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) { - if (max_revokes == 0) - goto out_of_blocks; - if (!list_empty(&bd->bd_list)) - continue; - gfs2_add_revoke(sdp, bd); - max_revokes--; - } - } -out_of_blocks: - spin_unlock(&sdp->sd_ail_lock); - gfs2_log_unlock(sdp); - - if (!sdp->sd_log_num_revoke) { - atomic_inc(&sdp->sd_log_blks_free); - if (!sdp->sd_log_blks_reserved) - atomic_inc(&sdp->sd_log_blks_free); - } -} - /** * log_write_header - Get and initialize a journal header buffer * @sdp: The GFS2 superblock @@ -633,6 +562,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags) lh = page_address(page); clear_page(lh); + gfs2_ail1_empty(sdp); tail = current_tail(sdp); lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); diff --git a/trunk/fs/gfs2/log.h b/trunk/fs/gfs2/log.h index 37216634f0aa..3566f35915e0 100644 --- a/trunk/fs/gfs2/log.h +++ b/trunk/fs/gfs2/log.h @@ -72,7 +72,5 @@ extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc) extern void gfs2_log_shutdown(struct gfs2_sbd *sdp); extern void gfs2_meta_syncfs(struct gfs2_sbd *sdp); extern int gfs2_logd(void *data); -extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); -extern void gfs2_write_revokes(struct gfs2_sbd *sdp); #endif /* __LOG_DOT_H__ */ diff --git a/trunk/fs/gfs2/lops.c b/trunk/fs/gfs2/lops.c index 17c5b5d7dc88..c5fa758fd844 100644 --- a/trunk/fs/gfs2/lops.c +++ b/trunk/fs/gfs2/lops.c @@ -16,7 +16,6 @@ #include #include #include -#include #include "gfs2.h" #include "incore.h" @@ -213,7 +212,7 @@ static void gfs2_end_log_write(struct bio *bio, int error) fs_err(sdp, "Error %d writing to log\n", error); } - bio_for_each_segment_all(bvec, bio, i) { + bio_for_each_segment(bvec, bio, i) { page = bvec->bv_page; if (page_has_buffers(page)) gfs2_end_log_write_bh(sdp, bvec, error); @@ -402,20 +401,6 @@ static void gfs2_check_magic(struct buffer_head *bh) kunmap_atomic(kaddr); } -static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b) -{ - struct gfs2_bufdata *bda, *bdb; - - bda = list_entry(a, struct gfs2_bufdata, bd_list); - bdb = list_entry(b, struct gfs2_bufdata, bd_list); - - if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) - return -1; - if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr) - return 1; - return 0; -} - static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, unsigned int total, struct list_head *blist, bool is_databuf) @@ -428,16 +413,13 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, __be64 *ptr; gfs2_log_lock(sdp); - list_sort(NULL, blist, blocknr_cmp); bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); while(total) { num = total; if (total > limit) num = limit; gfs2_log_unlock(sdp); - page = gfs2_get_log_desc(sdp, - is_databuf ? GFS2_LOG_DESC_JDATA : - GFS2_LOG_DESC_METADATA, num + 1, num); + page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num); ld = page_address(page); gfs2_log_lock(sdp); ptr = (__be64 *)(ld + 1); @@ -606,7 +588,6 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) struct page *page; unsigned int length; - gfs2_write_revokes(sdp); if (!sdp->sd_log_num_revoke) return; @@ -853,6 +834,10 @@ const struct gfs2_log_operations gfs2_revoke_lops = { .lo_name = "revoke", }; +const struct gfs2_log_operations gfs2_rg_lops = { + .lo_name = "rg", +}; + const struct gfs2_log_operations gfs2_databuf_lops = { .lo_before_commit = databuf_lo_before_commit, .lo_after_commit = databuf_lo_after_commit, @@ -864,6 +849,7 @@ const struct gfs2_log_operations gfs2_databuf_lops = { const struct gfs2_log_operations *gfs2_log_ops[] = { &gfs2_databuf_lops, &gfs2_buf_lops, + &gfs2_rg_lops, &gfs2_revoke_lops, NULL, }; diff --git a/trunk/fs/gfs2/lops.h b/trunk/fs/gfs2/lops.h index 9ca2e6438419..87e062e05c92 100644 --- a/trunk/fs/gfs2/lops.h +++ b/trunk/fs/gfs2/lops.h @@ -23,6 +23,7 @@ extern const struct gfs2_log_operations gfs2_glock_lops; extern const struct gfs2_log_operations gfs2_buf_lops; extern const struct gfs2_log_operations gfs2_revoke_lops; +extern const struct gfs2_log_operations gfs2_rg_lops; extern const struct gfs2_log_operations gfs2_databuf_lops; extern const struct gfs2_log_operations *gfs2_log_ops[]; diff --git a/trunk/fs/gfs2/meta_io.c b/trunk/fs/gfs2/meta_io.c index 0da390686c08..1a89afb68472 100644 --- a/trunk/fs/gfs2/meta_io.c +++ b/trunk/fs/gfs2/meta_io.c @@ -296,6 +296,10 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int if (bd) { spin_lock(&sdp->sd_ail_lock); if (bd->bd_tr) { + gfs2_remove_from_ail(bd); + bh->b_private = NULL; + bd->bd_bh = NULL; + bd->bd_blkno = bh->b_blocknr; gfs2_trans_add_revoke(sdp, bd); } spin_unlock(&sdp->sd_ail_lock); diff --git a/trunk/fs/gfs2/ops_fstype.c b/trunk/fs/gfs2/ops_fstype.c index 0262c190b6f9..60ede2a0f43f 100644 --- a/trunk/fs/gfs2/ops_fstype.c +++ b/trunk/fs/gfs2/ops_fstype.c @@ -916,16 +916,16 @@ static int init_threads(struct gfs2_sbd *sdp, int undo) goto fail_quotad; p = kthread_run(gfs2_logd, sdp, "gfs2_logd"); - if (IS_ERR(p)) { - error = PTR_ERR(p); + error = IS_ERR(p); + if (error) { fs_err(sdp, "can't start logd thread: %d\n", error); return error; } sdp->sd_logd_process = p; p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad"); - if (IS_ERR(p)) { - error = PTR_ERR(p); + error = IS_ERR(p); + if (error) { fs_err(sdp, "can't start quotad thread: %d\n", error); goto fail; } diff --git a/trunk/fs/gfs2/quota.c b/trunk/fs/gfs2/quota.c index 3768c2f40e43..c7c840e916f8 100644 --- a/trunk/fs/gfs2/quota.c +++ b/trunk/fs/gfs2/quota.c @@ -121,7 +121,7 @@ static u64 qd2index(struct gfs2_quota_data *qd) { struct kqid qid = qd->qd_id; return (2 * (u64)from_kqid(&init_user_ns, qid)) + - ((qid.type == USRQUOTA) ? 0 : 1); + (qid.type == USRQUOTA) ? 0 : 1; } static u64 qd2offset(struct gfs2_quota_data *qd) @@ -721,7 +721,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, goto unlock_out; } - gfs2_trans_add_data(ip->i_gl, bh); + gfs2_trans_add_meta(ip->i_gl, bh); kaddr = kmap_atomic(page); if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) @@ -1154,6 +1154,11 @@ int gfs2_quota_sync(struct super_block *sb, int type) return error; } +static int gfs2_quota_sync_timeo(struct super_block *sb, int type) +{ + return gfs2_quota_sync(sb, type); +} + int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid) { struct gfs2_quota_data *qd; @@ -1409,7 +1414,7 @@ int gfs2_quotad(void *data) &tune->gt_statfs_quantum); /* Update quota file */ - quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, + quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t, "ad_timeo, &tune->gt_quota_quantum); /* Check for & recover partially truncated inodes */ diff --git a/trunk/fs/gfs2/rgrp.c b/trunk/fs/gfs2/rgrp.c index 69317435faa7..0c5a575b513e 100644 --- a/trunk/fs/gfs2/rgrp.c +++ b/trunk/fs/gfs2/rgrp.c @@ -638,10 +638,8 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs) */ void gfs2_rs_delete(struct gfs2_inode *ip) { - struct inode *inode = &ip->i_inode; - down_write(&ip->i_rw_mutex); - if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) { + if (ip->i_res) { gfs2_rs_deltree(ip->i_res); BUG_ON(ip->i_res->rs_free); kmem_cache_free(gfs2_rsrv_cachep, ip->i_res); @@ -1288,15 +1286,13 @@ int gfs2_fitrim(struct file *filp, void __user *argp) minlen = max_t(u64, r.minlen, q->limits.discard_granularity) >> bs_shift; - if (end <= start || minlen > sdp->sd_max_rg_data) - return -EINVAL; - rgd = gfs2_blk2rgrpd(sdp, start, 0); - rgd_end = gfs2_blk2rgrpd(sdp, end, 0); + rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0); - if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end)) - && (start > rgd_end->rd_data0 + rgd_end->rd_data)) - return -EINVAL; /* start is beyond the end of the fs */ + if (end <= start || + minlen > sdp->sd_max_rg_data || + start > rgd_end->rd_data0 + rgd_end->rd_data) + return -EINVAL; while (1) { @@ -1338,7 +1334,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp) } out: - r.len = trimmed << bs_shift; + r.len = trimmed << 9; if (copy_to_user(argp, &r, sizeof(r))) return -EFAULT; @@ -1405,14 +1401,9 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, u32 extlen; u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved; int ret; - struct inode *inode = &ip->i_inode; - if (S_ISDIR(inode->i_mode)) - extlen = 1; - else { - extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested); - extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks); - } + extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested); + extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks); if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen)) return; diff --git a/trunk/fs/gfs2/super.c b/trunk/fs/gfs2/super.c index e5639dec66c4..917c8e1eb4ae 100644 --- a/trunk/fs/gfs2/super.c +++ b/trunk/fs/gfs2/super.c @@ -1444,7 +1444,6 @@ static void gfs2_evict_inode(struct inode *inode) /* Must not read inode block until block type has been verified */ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh); if (unlikely(error)) { - ip->i_iopen_gh.gh_flags |= GL_NOCACHE; gfs2_glock_dq_uninit(&ip->i_iopen_gh); goto out; } @@ -1515,10 +1514,8 @@ static void gfs2_evict_inode(struct inode *inode) if (gfs2_rs_active(ip->i_res)) gfs2_rs_deltree(ip->i_res); - if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { - ip->i_iopen_gh.gh_flags |= GL_NOCACHE; + if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) gfs2_glock_dq(&ip->i_iopen_gh); - } gfs2_holder_uninit(&ip->i_iopen_gh); gfs2_glock_dq_uninit(&gh); if (error && error != GLR_TRYFAILED && error != -EROFS) @@ -1537,7 +1534,6 @@ static void gfs2_evict_inode(struct inode *inode) ip->i_gl = NULL; if (ip->i_iopen_gh.gh_gl) { ip->i_iopen_gh.gh_gl->gl_object = NULL; - ip->i_iopen_gh.gh_flags |= GL_NOCACHE; gfs2_glock_dq_uninit(&ip->i_iopen_gh); } } diff --git a/trunk/fs/gfs2/trans.c b/trunk/fs/gfs2/trans.c index 2b20d7046bf3..7374907742a8 100644 --- a/trunk/fs/gfs2/trans.c +++ b/trunk/fs/gfs2/trans.c @@ -270,12 +270,19 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) { + struct gfs2_glock *gl = bd->bd_gl; struct gfs2_trans *tr = current->journal_info; BUG_ON(!list_empty(&bd->bd_list)); - gfs2_add_revoke(sdp, bd); + BUG_ON(!list_empty(&bd->bd_ail_st_list)); + BUG_ON(!list_empty(&bd->bd_ail_gl_list)); + bd->bd_ops = &gfs2_revoke_lops; tr->tr_touched = 1; tr->tr_num_revoke++; + sdp->sd_log_num_revoke++; + atomic_inc(&gl->gl_revokes); + set_bit(GLF_LFLUSH, &gl->gl_flags); + list_add(&bd->bd_list, &sdp->sd_log_le_revoke); } void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) diff --git a/trunk/fs/hfs/bnode.c b/trunk/fs/hfs/bnode.c index d3fa6bd9503e..f3b1a15ccd59 100644 --- a/trunk/fs/hfs/bnode.c +++ b/trunk/fs/hfs/bnode.c @@ -415,11 +415,7 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) spin_lock(&tree->hash_lock); node = hfs_bnode_findhash(tree, num); spin_unlock(&tree->hash_lock); - if (node) { - pr_crit("new node %u already hashed?\n", num); - WARN_ON(1); - return node; - } + BUG_ON(node); node = __hfs_bnode_create(tree, num); if (!node) return ERR_PTR(-ENOMEM); diff --git a/trunk/fs/hfs/dir.c b/trunk/fs/hfs/dir.c index 145566851e7a..e0101b6fb0d7 100644 --- a/trunk/fs/hfs/dir.c +++ b/trunk/fs/hfs/dir.c @@ -51,9 +51,9 @@ static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry, /* * hfs_readdir */ -static int hfs_readdir(struct file *file, struct dir_context *ctx) +static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFS_MAX_NAMELEN]; @@ -62,7 +62,7 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx) struct hfs_readdir_data *rd; u16 type; - if (ctx->pos >= inode->i_size) + if (filp->f_pos >= inode->i_size) return 0; err = hfs_find_init(HFS_SB(sb)->cat_tree, &fd); @@ -73,13 +73,14 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx) if (err) goto out; - if (ctx->pos == 0) { + switch ((u32)filp->f_pos) { + case 0: /* This is completely artificial... */ - if (!dir_emit_dot(file, ctx)) + if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) goto out; - ctx->pos = 1; - } - if (ctx->pos == 1) { + filp->f_pos++; + /* fall through */ + case 1: if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; @@ -96,16 +97,18 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx) // err = -EIO; // goto out; //} - if (!dir_emit(ctx, "..", 2, + if (filldir(dirent, "..", 2, 1, be32_to_cpu(entry.thread.ParID), DT_DIR)) goto out; - ctx->pos = 2; + filp->f_pos++; + /* fall through */ + default: + if (filp->f_pos >= inode->i_size) + goto out; + err = hfs_brec_goto(&fd, filp->f_pos - 1); + if (err) + goto out; } - if (ctx->pos >= inode->i_size) - goto out; - err = hfs_brec_goto(&fd, ctx->pos - 1); - if (err) - goto out; for (;;) { if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) { @@ -128,7 +131,7 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx) err = -EIO; goto out; } - if (!dir_emit(ctx, strbuf, len, + if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.dir.DirID), DT_DIR)) break; } else if (type == HFS_CDR_FIL) { @@ -137,7 +140,7 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx) err = -EIO; goto out; } - if (!dir_emit(ctx, strbuf, len, + if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.file.FlNum), DT_REG)) break; } else { @@ -145,22 +148,22 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx) err = -EIO; goto out; } - ctx->pos++; - if (ctx->pos >= inode->i_size) + filp->f_pos++; + if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } - rd = file->private_data; + rd = filp->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfs_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } - file->private_data = rd; - rd->file = file; + filp->private_data = rd; + rd->file = filp; list_add(&rd->list, &HFS_I(inode)->open_dir_list); } memcpy(&rd->key, &fd.key, sizeof(struct hfs_cat_key)); @@ -303,7 +306,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry, const struct file_operations hfs_dir_operations = { .read = generic_read_dir, - .iterate = hfs_readdir, + .readdir = hfs_readdir, .llseek = generic_file_llseek, .release = hfs_dir_release, }; diff --git a/trunk/fs/hfsplus/dir.c b/trunk/fs/hfsplus/dir.c index d8ce4bd17fc5..a37ac934732f 100644 --- a/trunk/fs/hfsplus/dir.c +++ b/trunk/fs/hfsplus/dir.c @@ -121,9 +121,9 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry, return ERR_PTR(err); } -static int hfsplus_readdir(struct file *file, struct dir_context *ctx) +static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFSPLUS_MAX_STRLEN + 1]; @@ -132,7 +132,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx) struct hfsplus_readdir_data *rd; u16 type; - if (file->f_pos >= inode->i_size) + if (filp->f_pos >= inode->i_size) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); @@ -143,13 +143,14 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx) if (err) goto out; - if (ctx->pos == 0) { + switch ((u32)filp->f_pos) { + case 0: /* This is completely artificial... */ - if (!dir_emit_dot(file, ctx)) + if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) goto out; - ctx->pos = 1; - } - if (ctx->pos == 1) { + filp->f_pos++; + /* fall through */ + case 1: if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; @@ -167,16 +168,19 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx) err = -EIO; goto out; } - if (!dir_emit(ctx, "..", 2, + if (filldir(dirent, "..", 2, 1, be32_to_cpu(entry.thread.parentID), DT_DIR)) goto out; - ctx->pos = 2; + filp->f_pos++; + /* fall through */ + default: + if (filp->f_pos >= inode->i_size) + goto out; + err = hfs_brec_goto(&fd, filp->f_pos - 1); + if (err) + goto out; } - if (ctx->pos >= inode->i_size) - goto out; - err = hfs_brec_goto(&fd, ctx->pos - 1); - if (err) - goto out; + for (;;) { if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { pr_err("walked past end of dir\n"); @@ -207,7 +211,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx) HFSPLUS_SB(sb)->hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) goto next; - if (!dir_emit(ctx, strbuf, len, + if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.folder.id), DT_DIR)) break; } else if (type == HFSPLUS_FILE) { @@ -216,7 +220,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx) err = -EIO; goto out; } - if (!dir_emit(ctx, strbuf, len, + if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.file.id), DT_REG)) break; } else { @@ -225,22 +229,22 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx) goto out; } next: - ctx->pos++; - if (ctx->pos >= inode->i_size) + filp->f_pos++; + if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } - rd = file->private_data; + rd = filp->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } - file->private_data = rd; - rd->file = file; + filp->private_data = rd; + rd->file = filp; list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); } memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); @@ -534,7 +538,7 @@ const struct inode_operations hfsplus_dir_inode_operations = { const struct file_operations hfsplus_dir_operations = { .fsync = hfsplus_file_fsync, .read = generic_read_dir, - .iterate = hfsplus_readdir, + .readdir = hfsplus_readdir, .unlocked_ioctl = hfsplus_ioctl, .llseek = generic_file_llseek, .release = hfsplus_dir_release, diff --git a/trunk/fs/hostfs/hostfs_kern.c b/trunk/fs/hostfs/hostfs_kern.c index cddb05217512..32f35f187989 100644 --- a/trunk/fs/hostfs/hostfs_kern.c +++ b/trunk/fs/hostfs/hostfs_kern.c @@ -277,7 +277,7 @@ static const struct super_operations hostfs_sbops = { .show_options = hostfs_show_options, }; -int hostfs_readdir(struct file *file, struct dir_context *ctx) +int hostfs_readdir(struct file *file, void *ent, filldir_t filldir) { void *dir; char *name; @@ -292,11 +292,12 @@ int hostfs_readdir(struct file *file, struct dir_context *ctx) __putname(name); if (dir == NULL) return -error; - next = ctx->pos; + next = file->f_pos; while ((name = read_dir(dir, &next, &ino, &len, &type)) != NULL) { - if (!dir_emit(ctx, name, len, ino, type)) - break; - ctx->pos = next; + error = (*filldir)(ent, name, len, file->f_pos, + ino, type); + if (error) break; + file->f_pos = next; } close_dir(dir); return 0; @@ -392,7 +393,7 @@ static const struct file_operations hostfs_file_fops = { static const struct file_operations hostfs_dir_fops = { .llseek = generic_file_llseek, - .iterate = hostfs_readdir, + .readdir = hostfs_readdir, .read = generic_read_dir, }; diff --git a/trunk/fs/hpfs/dir.c b/trunk/fs/hpfs/dir.c index 292b1acb9b81..546f6d39713a 100644 --- a/trunk/fs/hpfs/dir.c +++ b/trunk/fs/hpfs/dir.c @@ -33,38 +33,36 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence) if (whence == SEEK_DATA || whence == SEEK_HOLE) return -EINVAL; - mutex_lock(&i->i_mutex); hpfs_lock(s); /*printk("dir lseek\n");*/ if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok; + mutex_lock(&i->i_mutex); pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1; while (pos != new_off) { if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh); else goto fail; if (pos == 12) goto fail; } - hpfs_add_pos(i, &filp->f_pos); + mutex_unlock(&i->i_mutex); ok: - filp->f_pos = new_off; hpfs_unlock(s); - mutex_unlock(&i->i_mutex); - return new_off; + return filp->f_pos = new_off; fail: + mutex_unlock(&i->i_mutex); /*printk("illegal lseek: %016llx\n", new_off);*/ hpfs_unlock(s); - mutex_unlock(&i->i_mutex); return -ESPIPE; } -static int hpfs_readdir(struct file *file, struct dir_context *ctx) +static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); struct quad_buffer_head qbh; struct hpfs_dirent *de; int lc; - loff_t next_pos; + long old_pos; unsigned char *tempname; int c1, c2 = 0; int ret = 0; @@ -105,11 +103,11 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx) } } lc = hpfs_sb(inode->i_sb)->sb_lowercase; - if (ctx->pos == 12) { /* diff -r requires this (note, that diff -r */ - ctx->pos = 13; /* also fails on msdos filesystem in 2.0) */ + if (filp->f_pos == 12) { /* diff -r requires this (note, that diff -r */ + filp->f_pos = 13; /* also fails on msdos filesystem in 2.0) */ goto out; } - if (ctx->pos == 13) { + if (filp->f_pos == 13) { ret = -ENOENT; goto out; } @@ -120,34 +118,33 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx) accepted by filldir, but what can I do? maybe killall -9 ls helps */ if (hpfs_sb(inode->i_sb)->sb_chk) - if (hpfs_stop_cycles(inode->i_sb, ctx->pos, &c1, &c2, "hpfs_readdir")) { + if (hpfs_stop_cycles(inode->i_sb, filp->f_pos, &c1, &c2, "hpfs_readdir")) { ret = -EFSERROR; goto out; } - if (ctx->pos == 12) + if (filp->f_pos == 12) goto out; - if (ctx->pos == 3 || ctx->pos == 4 || ctx->pos == 5) { - printk("HPFS: warning: pos==%d\n",(int)ctx->pos); + if (filp->f_pos == 3 || filp->f_pos == 4 || filp->f_pos == 5) { + printk("HPFS: warning: pos==%d\n",(int)filp->f_pos); goto out; } - if (ctx->pos == 0) { - if (!dir_emit_dot(file, ctx)) + if (filp->f_pos == 0) { + if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) goto out; - ctx->pos = 11; + filp->f_pos = 11; } - if (ctx->pos == 11) { - if (!dir_emit(ctx, "..", 2, hpfs_inode->i_parent_dir, DT_DIR)) + if (filp->f_pos == 11) { + if (filldir(dirent, "..", 2, filp->f_pos, hpfs_inode->i_parent_dir, DT_DIR) < 0) goto out; - ctx->pos = 1; + filp->f_pos = 1; } - if (ctx->pos == 1) { - ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1; - hpfs_add_pos(inode, &file->f_pos); - file->f_version = inode->i_version; + if (filp->f_pos == 1) { + filp->f_pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1; + hpfs_add_pos(inode, &filp->f_pos); + filp->f_version = inode->i_version; } - next_pos = ctx->pos; - if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) { - ctx->pos = next_pos; + old_pos = filp->f_pos; + if (!(de = map_pos_dirent(inode, &filp->f_pos, &qbh))) { ret = -EIOERROR; goto out; } @@ -155,21 +152,20 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx) if (hpfs_sb(inode->i_sb)->sb_chk) { if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) - hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", (unsigned long)ctx->pos); + hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos); if (de->last && (de->namelen != 1 || de ->name[0] != 255)) - hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", (unsigned long)ctx->pos); + hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos); } hpfs_brelse4(&qbh); - ctx->pos = next_pos; goto again; } tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); - if (!dir_emit(ctx, tempname, de->namelen, le32_to_cpu(de->fnode), DT_UNKNOWN)) { + if (filldir(dirent, tempname, de->namelen, old_pos, le32_to_cpu(de->fnode), DT_UNKNOWN) < 0) { + filp->f_pos = old_pos; if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); goto out; } - ctx->pos = next_pos; if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); } @@ -324,7 +320,7 @@ const struct file_operations hpfs_dir_ops = { .llseek = hpfs_dir_lseek, .read = generic_read_dir, - .iterate = hpfs_readdir, + .readdir = hpfs_readdir, .release = hpfs_dir_release, .fsync = hpfs_file_fsync, }; diff --git a/trunk/fs/hpfs/file.c b/trunk/fs/hpfs/file.c index e4ba5fe4c3b5..3027f4dbbab5 100644 --- a/trunk/fs/hpfs/file.c +++ b/trunk/fs/hpfs/file.c @@ -109,14 +109,10 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; - hpfs_lock(inode->i_sb); - if (to > inode->i_size) { truncate_pagecache(inode, to, inode->i_size); hpfs_truncate(inode); } - - hpfs_unlock(inode->i_sb); } static int hpfs_write_begin(struct file *file, struct address_space *mapping, diff --git a/trunk/fs/hppfs/hppfs.c b/trunk/fs/hppfs/hppfs.c index fc90ab11c340..cd3e38972c86 100644 --- a/trunk/fs/hppfs/hppfs.c +++ b/trunk/fs/hppfs/hppfs.c @@ -542,8 +542,8 @@ static const struct file_operations hppfs_file_fops = { }; struct hppfs_dirent { - struct dir_context ctx; - struct dir_context *caller; + void *vfs_dirent; + filldir_t filldir; struct dentry *dentry; }; @@ -555,29 +555,34 @@ static int hppfs_filldir(void *d, const char *name, int size, if (file_removed(dirent->dentry, name)) return 0; - dirent->caller->pos = dirent->ctx.pos; - return !dir_emit(dirent->caller, name, size, inode, type); + return (*dirent->filldir)(dirent->vfs_dirent, name, size, offset, + inode, type); } -static int hppfs_readdir(struct file *file, struct dir_context *ctx) +static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; - struct hppfs_dirent d = { - .ctx.actor = hppfs_filldir, - .caller = ctx, - .dentry = file->f_path.dentry - }; + int (*readdir)(struct file *, void *, filldir_t); + struct hppfs_dirent dirent = ((struct hppfs_dirent) + { .vfs_dirent = ent, + .filldir = filldir, + .dentry = file->f_path.dentry + }); int err; - proc_file->f_pos = ctx->pos; - err = iterate_dir(proc_file, &d.ctx); - ctx->pos = d.ctx.pos; + + readdir = file_inode(proc_file)->i_fop->readdir; + + proc_file->f_pos = file->f_pos; + err = (*readdir)(proc_file, &dirent, hppfs_filldir); + file->f_pos = proc_file->f_pos; + return err; } static const struct file_operations hppfs_dir_fops = { .owner = NULL, - .iterate = hppfs_readdir, + .readdir = hppfs_readdir, .open = hppfs_dir_open, .llseek = default_llseek, .release = hppfs_release, diff --git a/trunk/fs/internal.h b/trunk/fs/internal.h index 68121584ae37..eaa75f75b625 100644 --- a/trunk/fs/internal.h +++ b/trunk/fs/internal.h @@ -131,12 +131,6 @@ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); */ extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); -/* - * splice.c - */ -extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, - loff_t *opos, size_t len, unsigned int flags); - /* * pipe.c */ diff --git a/trunk/fs/isofs/dir.c b/trunk/fs/isofs/dir.c index b943cbd963bb..a7d5c3c3d4e6 100644 --- a/trunk/fs/isofs/dir.c +++ b/trunk/fs/isofs/dir.c @@ -78,8 +78,8 @@ int get_acorn_filename(struct iso_directory_record *de, /* * This should _really_ be cleaned up some day.. */ -static int do_isofs_readdir(struct inode *inode, struct file *file, - struct dir_context *ctx, +static int do_isofs_readdir(struct inode *inode, struct file *filp, + void *dirent, filldir_t filldir, char *tmpname, struct iso_directory_record *tmpde) { unsigned long bufsize = ISOFS_BUFFER_SIZE(inode); @@ -94,10 +94,10 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, struct iso_directory_record *de; struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb); - offset = ctx->pos & (bufsize - 1); - block = ctx->pos >> bufbits; + offset = filp->f_pos & (bufsize - 1); + block = filp->f_pos >> bufbits; - while (ctx->pos < inode->i_size) { + while (filp->f_pos < inode->i_size) { int de_len; if (!bh) { @@ -108,7 +108,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, de = (struct iso_directory_record *) (bh->b_data + offset); - de_len = *(unsigned char *)de; + de_len = *(unsigned char *) de; /* * If the length byte is zero, we should move on to the next @@ -119,8 +119,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, if (de_len == 0) { brelse(bh); bh = NULL; - ctx->pos = (ctx->pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1); - block = ctx->pos >> bufbits; + filp->f_pos = (filp->f_pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1); + block = filp->f_pos >> bufbits; offset = 0; continue; } @@ -164,16 +164,16 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, if (de->flags[-sbi->s_high_sierra] & 0x80) { first_de = 0; - ctx->pos += de_len; + filp->f_pos += de_len; continue; } first_de = 1; /* Handle the case of the '.' directory */ if (de->name_len[0] == 1 && de->name[0] == 0) { - if (!dir_emit_dot(file, ctx)) + if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) break; - ctx->pos += de_len; + filp->f_pos += de_len; continue; } @@ -181,9 +181,10 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, /* Handle the case of the '..' directory */ if (de->name_len[0] == 1 && de->name[0] == 1) { - if (!dir_emit_dotdot(file, ctx)) + inode_number = parent_ino(filp->f_path.dentry); + if (filldir(dirent, "..", 2, filp->f_pos, inode_number, DT_DIR) < 0) break; - ctx->pos += de_len; + filp->f_pos += de_len; continue; } @@ -197,7 +198,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, if ((sbi->s_hide && (de->flags[-sbi->s_high_sierra] & 1)) || (!sbi->s_showassoc && (de->flags[-sbi->s_high_sierra] & 4))) { - ctx->pos += de_len; + filp->f_pos += de_len; continue; } @@ -229,10 +230,10 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, } } if (len > 0) { - if (!dir_emit(ctx, p, len, inode_number, DT_UNKNOWN)) + if (filldir(dirent, p, len, filp->f_pos, inode_number, DT_UNKNOWN) < 0) break; } - ctx->pos += de_len; + filp->f_pos += de_len; continue; } @@ -246,12 +247,13 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, * handling split directory entries.. The real work is done by * "do_isofs_readdir()". */ -static int isofs_readdir(struct file *file, struct dir_context *ctx) +static int isofs_readdir(struct file *filp, + void *dirent, filldir_t filldir) { int result; char *tmpname; struct iso_directory_record *tmpde; - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); tmpname = (char *)__get_free_page(GFP_KERNEL); if (tmpname == NULL) @@ -259,7 +261,7 @@ static int isofs_readdir(struct file *file, struct dir_context *ctx) tmpde = (struct iso_directory_record *) (tmpname+1024); - result = do_isofs_readdir(inode, file, ctx, tmpname, tmpde); + result = do_isofs_readdir(inode, filp, dirent, filldir, tmpname, tmpde); free_page((unsigned long) tmpname); return result; @@ -269,7 +271,7 @@ const struct file_operations isofs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = isofs_readdir, + .readdir = isofs_readdir, }; /* diff --git a/trunk/fs/jbd/transaction.c b/trunk/fs/jbd/transaction.c index be0c39b66fe0..e3e255c0a509 100644 --- a/trunk/fs/jbd/transaction.c +++ b/trunk/fs/jbd/transaction.c @@ -2019,20 +2019,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, * void journal_invalidatepage() - invalidate a journal page * @journal: journal to use for flush * @page: page to flush - * @offset: offset of the range to invalidate - * @length: length of the range to invalidate + * @offset: length of page to invalidate. * - * Reap page buffers containing data in specified range in page. + * Reap page buffers containing data after offset in page. */ void journal_invalidatepage(journal_t *journal, struct page *page, - unsigned int offset, - unsigned int length) + unsigned long offset) { struct buffer_head *head, *bh, *next; - unsigned int stop = offset + length; unsigned int curr_off = 0; - int partial_page = (offset || length < PAGE_CACHE_SIZE); int may_free = 1; if (!PageLocked(page)) @@ -2040,8 +2036,6 @@ void journal_invalidatepage(journal_t *journal, if (!page_has_buffers(page)) return; - BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); - /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be * cautious in our locking. */ @@ -2051,14 +2045,11 @@ void journal_invalidatepage(journal_t *journal, unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; - if (next_off > stop) - return; - if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); may_free &= journal_unmap_buffer(journal, bh, - partial_page); + offset > 0); unlock_buffer(bh); } curr_off = next_off; @@ -2066,7 +2057,7 @@ void journal_invalidatepage(journal_t *journal, } while (bh != head); - if (!partial_page) { + if (!offset) { if (may_free && try_to_free_buffers(page)) J_ASSERT(!page_has_buffers(page)); } diff --git a/trunk/fs/jbd2/Kconfig b/trunk/fs/jbd2/Kconfig index 5a9f5534d57b..69a48c2944da 100644 --- a/trunk/fs/jbd2/Kconfig +++ b/trunk/fs/jbd2/Kconfig @@ -20,7 +20,7 @@ config JBD2 config JBD2_DEBUG bool "JBD2 (ext4) debugging support" - depends on JBD2 + depends on JBD2 && DEBUG_FS help If you are using the ext4 journaled file system (or potentially any other filesystem/device using JBD2), this option @@ -29,7 +29,7 @@ config JBD2_DEBUG By default, the debugging output will be turned off. If you select Y here, then you will be able to turn on debugging - with "echo N > /sys/module/jbd2/parameters/jbd2_debug", where N is a + with "echo N > /sys/kernel/debug/jbd2/jbd2-debug", where N is a number between 1 and 5. The higher the number, the more debugging output is generated. To turn debugging off again, do - "echo 0 > /sys/module/jbd2/parameters/jbd2_debug". + "echo 0 > /sys/kernel/debug/jbd2/jbd2-debug". diff --git a/trunk/fs/jbd2/checkpoint.c b/trunk/fs/jbd2/checkpoint.c index 7f34f4716165..c78841ee81cf 100644 --- a/trunk/fs/jbd2/checkpoint.c +++ b/trunk/fs/jbd2/checkpoint.c @@ -120,8 +120,8 @@ void __jbd2_log_wait_for_space(journal_t *journal) int nblocks, space_left; /* assert_spin_locked(&journal->j_state_lock); */ - nblocks = jbd2_space_needed(journal); - while (jbd2_log_space_left(journal) < nblocks) { + nblocks = jbd_space_needed(journal); + while (__jbd2_log_space_left(journal) < nblocks) { if (journal->j_flags & JBD2_ABORT) return; write_unlock(&journal->j_state_lock); @@ -140,8 +140,8 @@ void __jbd2_log_wait_for_space(journal_t *journal) */ write_lock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); - nblocks = jbd2_space_needed(journal); - space_left = jbd2_log_space_left(journal); + nblocks = jbd_space_needed(journal); + space_left = __jbd2_log_space_left(journal); if (space_left < nblocks) { int chkpt = journal->j_checkpoint_transactions != NULL; tid_t tid = 0; @@ -156,15 +156,7 @@ void __jbd2_log_wait_for_space(journal_t *journal) /* We were able to recover space; yay! */ ; } else if (tid) { - /* - * jbd2_journal_commit_transaction() may want - * to take the checkpoint_mutex if JBD2_FLUSHED - * is set. So we need to temporarily drop it. - */ - mutex_unlock(&journal->j_checkpoint_mutex); jbd2_log_wait_commit(journal, tid); - write_lock(&journal->j_state_lock); - continue; } else { printk(KERN_ERR "%s: needed %d blocks and " "only had %d space available\n", @@ -633,6 +625,10 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) __jbd2_journal_drop_transaction(journal, transaction); jbd2_journal_free_transaction(transaction); + + /* Just in case anybody was waiting for more transactions to be + checkpointed... */ + wake_up(&journal->j_wait_logspace); ret = 1; out: return ret; @@ -694,7 +690,9 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact J_ASSERT(transaction->t_state == T_FINISHED); J_ASSERT(transaction->t_buffers == NULL); J_ASSERT(transaction->t_forget == NULL); + J_ASSERT(transaction->t_iobuf_list == NULL); J_ASSERT(transaction->t_shadow_list == NULL); + J_ASSERT(transaction->t_log_list == NULL); J_ASSERT(transaction->t_checkpoint_list == NULL); J_ASSERT(transaction->t_checkpoint_io_list == NULL); J_ASSERT(atomic_read(&transaction->t_updates) == 0); diff --git a/trunk/fs/jbd2/commit.c b/trunk/fs/jbd2/commit.c index 559bec1a37b4..0f53946f13c1 100644 --- a/trunk/fs/jbd2/commit.c +++ b/trunk/fs/jbd2/commit.c @@ -30,22 +30,15 @@ #include /* - * IO end handler for temporary buffer_heads handling writes to the journal. + * Default IO end handler for temporary BJ_IO buffer_heads. */ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { - struct buffer_head *orig_bh = bh->b_private; - BUFFER_TRACE(bh, ""); if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); - if (orig_bh) { - clear_bit_unlock(BH_Shadow, &orig_bh->b_state); - smp_mb__after_clear_bit(); - wake_up_bit(&orig_bh->b_state, BH_Shadow); - } unlock_buffer(bh); } @@ -92,7 +85,8 @@ static void release_buffer_page(struct buffer_head *bh) __brelse(bh); } -static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh) +static void jbd2_commit_block_csum_set(journal_t *j, + struct journal_head *descriptor) { struct commit_header *h; __u32 csum; @@ -100,11 +94,12 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh) if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return; - h = (struct commit_header *)(bh->b_data); + h = (struct commit_header *)(jh2bh(descriptor)->b_data); h->h_chksum_type = 0; h->h_chksum_size = 0; h->h_chksum[0] = 0; - csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); + csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data, + j->j_blocksize); h->h_chksum[0] = cpu_to_be32(csum); } @@ -121,6 +116,7 @@ static int journal_submit_commit_record(journal_t *journal, struct buffer_head **cbh, __u32 crc32_sum) { + struct journal_head *descriptor; struct commit_header *tmp; struct buffer_head *bh; int ret; @@ -131,10 +127,12 @@ static int journal_submit_commit_record(journal_t *journal, if (is_journal_aborted(journal)) return 0; - bh = jbd2_journal_get_descriptor_buffer(journal); - if (!bh) + descriptor = jbd2_journal_get_descriptor_buffer(journal); + if (!descriptor) return 1; + bh = jh2bh(descriptor); + tmp = (struct commit_header *)bh->b_data; tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK); @@ -148,9 +146,9 @@ static int journal_submit_commit_record(journal_t *journal, tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE; tmp->h_chksum[0] = cpu_to_be32(crc32_sum); } - jbd2_commit_block_csum_set(journal, bh); + jbd2_commit_block_csum_set(journal, descriptor); - BUFFER_TRACE(bh, "submit commit block"); + JBUFFER_TRACE(descriptor, "submit commit block"); lock_buffer(bh); clear_buffer_dirty(bh); set_buffer_uptodate(bh); @@ -182,6 +180,7 @@ static int journal_wait_on_commit_record(journal_t *journal, if (unlikely(!buffer_uptodate(bh))) ret = -EIO; put_bh(bh); /* One for getblk() */ + jbd2_journal_put_journal_head(bh2jh(bh)); return ret; } @@ -322,7 +321,7 @@ static void write_tag_block(int tag_bytes, journal_block_tag_t *tag, } static void jbd2_descr_block_csum_set(journal_t *j, - struct buffer_head *bh) + struct journal_head *descriptor) { struct jbd2_journal_block_tail *tail; __u32 csum; @@ -330,10 +329,12 @@ static void jbd2_descr_block_csum_set(journal_t *j, if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return; - tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize - + tail = (struct jbd2_journal_block_tail *) + (jh2bh(descriptor)->b_data + j->j_blocksize - sizeof(struct jbd2_journal_block_tail)); tail->t_checksum = 0; - csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); + csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data, + j->j_blocksize); tail->t_checksum = cpu_to_be32(csum); } @@ -342,21 +343,20 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag, { struct page *page = bh->b_page; __u8 *addr; - __u32 csum32; + __u32 csum; if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return; sequence = cpu_to_be32(sequence); addr = kmap_atomic(page); - csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, - sizeof(sequence)); - csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data), - bh->b_size); + csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, + sizeof(sequence)); + csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data), + bh->b_size); kunmap_atomic(addr); - /* We only have space to store the lower 16 bits of the crc32c. */ - tag->t_checksum = cpu_to_be16(csum32); + tag->t_checksum = cpu_to_be32(csum); } /* * jbd2_journal_commit_transaction @@ -368,8 +368,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) { struct transaction_stats_s stats; transaction_t *commit_transaction; - struct journal_head *jh; - struct buffer_head *descriptor; + struct journal_head *jh, *new_jh, *descriptor; struct buffer_head **wbuf = journal->j_wbuf; int bufs; int flags; @@ -393,8 +392,6 @@ void jbd2_journal_commit_transaction(journal_t *journal) tid_t first_tid; int update_tail; int csum_size = 0; - LIST_HEAD(io_bufs); - LIST_HEAD(log_bufs); if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) csum_size = sizeof(struct jbd2_journal_block_tail); @@ -427,13 +424,13 @@ void jbd2_journal_commit_transaction(journal_t *journal) J_ASSERT(journal->j_committing_transaction == NULL); commit_transaction = journal->j_running_transaction; + J_ASSERT(commit_transaction->t_state == T_RUNNING); trace_jbd2_start_commit(journal, commit_transaction); jbd_debug(1, "JBD2: starting commit of transaction %d\n", commit_transaction->t_tid); write_lock(&journal->j_state_lock); - J_ASSERT(commit_transaction->t_state == T_RUNNING); commit_transaction->t_state = T_LOCKED; trace_jbd2_commit_locking(journal, commit_transaction); @@ -523,12 +520,6 @@ void jbd2_journal_commit_transaction(journal_t *journal) */ jbd2_journal_switch_revoke_table(journal); - /* - * Reserved credits cannot be claimed anymore, free them - */ - atomic_sub(atomic_read(&journal->j_reserved_credits), - &commit_transaction->t_outstanding_credits); - trace_jbd2_commit_flushing(journal, commit_transaction); stats.run.rs_flushing = jiffies; stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked, @@ -542,7 +533,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) wake_up(&journal->j_wait_transaction_locked); write_unlock(&journal->j_state_lock); - jbd_debug(3, "JBD2: commit phase 2a\n"); + jbd_debug(3, "JBD2: commit phase 2\n"); /* * Now start flushing things to disk, in the order they appear @@ -554,10 +545,10 @@ void jbd2_journal_commit_transaction(journal_t *journal) blk_start_plug(&plug); jbd2_journal_write_revoke_records(journal, commit_transaction, - &log_bufs, WRITE_SYNC); + WRITE_SYNC); blk_finish_plug(&plug); - jbd_debug(3, "JBD2: commit phase 2b\n"); + jbd_debug(3, "JBD2: commit phase 2\n"); /* * Way to go: we have now written out all of the data for a @@ -580,8 +571,8 @@ void jbd2_journal_commit_transaction(journal_t *journal) atomic_read(&commit_transaction->t_outstanding_credits)); err = 0; - bufs = 0; descriptor = NULL; + bufs = 0; blk_start_plug(&plug); while (commit_transaction->t_buffers) { @@ -613,6 +604,8 @@ void jbd2_journal_commit_transaction(journal_t *journal) record the metadata buffer. */ if (!descriptor) { + struct buffer_head *bh; + J_ASSERT (bufs == 0); jbd_debug(4, "JBD2: get descriptor\n"); @@ -623,26 +616,26 @@ void jbd2_journal_commit_transaction(journal_t *journal) continue; } + bh = jh2bh(descriptor); jbd_debug(4, "JBD2: got buffer %llu (%p)\n", - (unsigned long long)descriptor->b_blocknr, - descriptor->b_data); - header = (journal_header_t *)descriptor->b_data; + (unsigned long long)bh->b_blocknr, bh->b_data); + header = (journal_header_t *)&bh->b_data[0]; header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK); header->h_sequence = cpu_to_be32(commit_transaction->t_tid); - tagp = &descriptor->b_data[sizeof(journal_header_t)]; - space_left = descriptor->b_size - - sizeof(journal_header_t); + tagp = &bh->b_data[sizeof(journal_header_t)]; + space_left = bh->b_size - sizeof(journal_header_t); first_tag = 1; - set_buffer_jwrite(descriptor); - set_buffer_dirty(descriptor); - wbuf[bufs++] = descriptor; + set_buffer_jwrite(bh); + set_buffer_dirty(bh); + wbuf[bufs++] = bh; /* Record it so that we can wait for IO completion later */ - BUFFER_TRACE(descriptor, "ph3: file as descriptor"); - jbd2_file_log_bh(&log_bufs, descriptor); + BUFFER_TRACE(bh, "ph3: file as descriptor"); + jbd2_journal_file_buffer(descriptor, commit_transaction, + BJ_LogCtl); } /* Where is the buffer to be written? */ @@ -665,22 +658,29 @@ void jbd2_journal_commit_transaction(journal_t *journal) /* Bump b_count to prevent truncate from stumbling over the shadowed buffer! @@@ This can go if we ever get - rid of the shadow pairing of buffers. */ + rid of the BJ_IO/BJ_Shadow pairing of buffers. */ atomic_inc(&jh2bh(jh)->b_count); + /* Make a temporary IO buffer with which to write it out + (this will requeue both the metadata buffer and the + temporary IO buffer). new_bh goes on BJ_IO*/ + + set_bit(BH_JWrite, &jh2bh(jh)->b_state); /* - * Make a temporary IO buffer with which to write it out - * (this will requeue the metadata buffer to BJ_Shadow). + * akpm: jbd2_journal_write_metadata_buffer() sets + * new_bh->b_transaction to commit_transaction. + * We need to clean this up before we release new_bh + * (which is of type BJ_IO) */ - set_bit(BH_JWrite, &jh2bh(jh)->b_state); JBUFFER_TRACE(jh, "ph3: write metadata"); flags = jbd2_journal_write_metadata_buffer(commit_transaction, - jh, &wbuf[bufs], blocknr); + jh, &new_jh, blocknr); if (flags < 0) { jbd2_journal_abort(journal, flags); continue; } - jbd2_file_log_bh(&io_bufs, wbuf[bufs]); + set_bit(BH_JWrite, &jh2bh(new_jh)->b_state); + wbuf[bufs++] = jh2bh(new_jh); /* Record the new block's tag in the current descriptor buffer */ @@ -694,11 +694,10 @@ void jbd2_journal_commit_transaction(journal_t *journal) tag = (journal_block_tag_t *) tagp; write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr); tag->t_flags = cpu_to_be16(tag_flag); - jbd2_block_tag_csum_set(journal, tag, wbuf[bufs], + jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh), commit_transaction->t_tid); tagp += tag_bytes; space_left -= tag_bytes; - bufs++; if (first_tag) { memcpy (tagp, journal->j_uuid, 16); @@ -810,7 +809,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) the log. Before we can commit it, wait for the IO so far to complete. Control buffers being written are on the transaction's t_log_list queue, and metadata buffers are on - the io_bufs list. + the t_iobuf_list queue. Wait for the buffers in reverse order. That way we are less likely to be woken up until all IOs have completed, and @@ -819,33 +818,47 @@ void jbd2_journal_commit_transaction(journal_t *journal) jbd_debug(3, "JBD2: commit phase 3\n"); - while (!list_empty(&io_bufs)) { - struct buffer_head *bh = list_entry(io_bufs.prev, - struct buffer_head, - b_assoc_buffers); + /* + * akpm: these are BJ_IO, and j_list_lock is not needed. + * See __journal_try_to_free_buffer. + */ +wait_for_iobuf: + while (commit_transaction->t_iobuf_list != NULL) { + struct buffer_head *bh; - wait_on_buffer(bh); - cond_resched(); + jh = commit_transaction->t_iobuf_list->b_tprev; + bh = jh2bh(jh); + if (buffer_locked(bh)) { + wait_on_buffer(bh); + goto wait_for_iobuf; + } + if (cond_resched()) + goto wait_for_iobuf; if (unlikely(!buffer_uptodate(bh))) err = -EIO; - jbd2_unfile_log_bh(bh); + + clear_buffer_jwrite(bh); + + JBUFFER_TRACE(jh, "ph4: unfile after journal write"); + jbd2_journal_unfile_buffer(journal, jh); /* - * The list contains temporary buffer heads created by - * jbd2_journal_write_metadata_buffer(). + * ->t_iobuf_list should contain only dummy buffer_heads + * which were created by jbd2_journal_write_metadata_buffer(). */ BUFFER_TRACE(bh, "dumping temporary bh"); + jbd2_journal_put_journal_head(jh); __brelse(bh); J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0); free_buffer_head(bh); - /* We also have to refile the corresponding shadowed buffer */ + /* We also have to unlock and free the corresponding + shadowed buffer */ jh = commit_transaction->t_shadow_list->b_tprev; bh = jh2bh(jh); - clear_buffer_jwrite(bh); + clear_bit(BH_JWrite, &bh->b_state); J_ASSERT_BH(bh, buffer_jbddirty(bh)); - J_ASSERT_BH(bh, !buffer_shadow(bh)); /* The metadata is now released for reuse, but we need to remember it against this transaction so that when @@ -853,6 +866,14 @@ void jbd2_journal_commit_transaction(journal_t *journal) required. */ JBUFFER_TRACE(jh, "file as BJ_Forget"); jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget); + /* + * Wake up any transactions which were waiting for this IO to + * complete. The barrier must be here so that changes by + * jbd2_journal_file_buffer() take effect before wake_up_bit() + * does the waitqueue check. + */ + smp_mb(); + wake_up_bit(&bh->b_state, BH_Unshadow); JBUFFER_TRACE(jh, "brelse shadowed buffer"); __brelse(bh); } @@ -862,19 +883,26 @@ void jbd2_journal_commit_transaction(journal_t *journal) jbd_debug(3, "JBD2: commit phase 4\n"); /* Here we wait for the revoke record and descriptor record buffers */ - while (!list_empty(&log_bufs)) { + wait_for_ctlbuf: + while (commit_transaction->t_log_list != NULL) { struct buffer_head *bh; - bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers); - wait_on_buffer(bh); - cond_resched(); + jh = commit_transaction->t_log_list->b_tprev; + bh = jh2bh(jh); + if (buffer_locked(bh)) { + wait_on_buffer(bh); + goto wait_for_ctlbuf; + } + if (cond_resched()) + goto wait_for_ctlbuf; if (unlikely(!buffer_uptodate(bh))) err = -EIO; BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile"); clear_buffer_jwrite(bh); - jbd2_unfile_log_bh(bh); + jbd2_journal_unfile_buffer(journal, jh); + jbd2_journal_put_journal_head(jh); __brelse(bh); /* One for getblk */ /* AKPM: bforget here */ } @@ -924,7 +952,9 @@ void jbd2_journal_commit_transaction(journal_t *journal) J_ASSERT(list_empty(&commit_transaction->t_inode_list)); J_ASSERT(commit_transaction->t_buffers == NULL); J_ASSERT(commit_transaction->t_checkpoint_list == NULL); + J_ASSERT(commit_transaction->t_iobuf_list == NULL); J_ASSERT(commit_transaction->t_shadow_list == NULL); + J_ASSERT(commit_transaction->t_log_list == NULL); restart_loop: /* diff --git a/trunk/fs/jbd2/journal.c b/trunk/fs/jbd2/journal.c index 02c7ad9d7a41..95457576e434 100644 --- a/trunk/fs/jbd2/journal.c +++ b/trunk/fs/jbd2/journal.c @@ -103,24 +103,6 @@ EXPORT_SYMBOL(jbd2_inode_cache); static void __journal_abort_soft (journal_t *journal, int errno); static int jbd2_journal_create_slab(size_t slab_size); -#ifdef CONFIG_JBD2_DEBUG -void __jbd2_debug(int level, const char *file, const char *func, - unsigned int line, const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - if (level > jbd2_journal_enable_debug) - return; - va_start(args, fmt); - vaf.fmt = fmt; - vaf.va = &args; - printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf); - va_end(args); -} -EXPORT_SYMBOL(__jbd2_debug); -#endif - /* Checksumming functions */ int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb) { @@ -328,12 +310,14 @@ static void journal_kill_thread(journal_t *journal) * * If the source buffer has already been modified by a new transaction * since we took the last commit snapshot, we use the frozen copy of - * that data for IO. If we end up using the existing buffer_head's data - * for the write, then we have to make sure nobody modifies it while the - * IO is in progress. do_get_write_access() handles this. + * that data for IO. If we end up using the existing buffer_head's data + * for the write, then we *have* to lock the buffer to prevent anyone + * else from using and possibly modifying it while the IO is in + * progress. * - * The function returns a pointer to the buffer_head to be used for IO. - * + * The function returns a pointer to the buffer_heads to be used for IO. + * + * We assume that the journal has already been locked in this function. * * Return value: * <0: Error @@ -346,14 +330,15 @@ static void journal_kill_thread(journal_t *journal) int jbd2_journal_write_metadata_buffer(transaction_t *transaction, struct journal_head *jh_in, - struct buffer_head **bh_out, - sector_t blocknr) + struct journal_head **jh_out, + unsigned long long blocknr) { int need_copy_out = 0; int done_copy_out = 0; int do_escape = 0; char *mapped_data; struct buffer_head *new_bh; + struct journal_head *new_jh; struct page *new_page; unsigned int new_offset; struct buffer_head *bh_in = jh2bh(jh_in); @@ -383,13 +368,14 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, /* keep subsequent assertions sane */ atomic_set(&new_bh->b_count, 1); + new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */ - jbd_lock_bh_state(bh_in); -repeat: /* * If a new transaction has already done a buffer copy-out, then * we use that version of the data for the commit. */ + jbd_lock_bh_state(bh_in); +repeat: if (jh_in->b_frozen_data) { done_copy_out = 1; new_page = virt_to_page(jh_in->b_frozen_data); @@ -429,7 +415,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, jbd_unlock_bh_state(bh_in); tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS); if (!tmp) { - brelse(new_bh); + jbd2_journal_put_journal_head(new_jh); return -ENOMEM; } jbd_lock_bh_state(bh_in); @@ -440,7 +426,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, jh_in->b_frozen_data = tmp; mapped_data = kmap_atomic(new_page); - memcpy(tmp, mapped_data + new_offset, bh_in->b_size); + memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size); kunmap_atomic(mapped_data); new_page = virt_to_page(tmp); @@ -466,14 +452,14 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, } set_bh_page(new_bh, new_page, new_offset); - new_bh->b_size = bh_in->b_size; - new_bh->b_bdev = journal->j_dev; + new_jh->b_transaction = NULL; + new_bh->b_size = jh2bh(jh_in)->b_size; + new_bh->b_bdev = transaction->t_journal->j_dev; new_bh->b_blocknr = blocknr; - new_bh->b_private = bh_in; set_buffer_mapped(new_bh); set_buffer_dirty(new_bh); - *bh_out = new_bh; + *jh_out = new_jh; /* * The to-be-written buffer needs to get moved to the io queue, @@ -484,9 +470,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow); spin_unlock(&journal->j_list_lock); - set_buffer_shadow(bh_in); jbd_unlock_bh_state(bh_in); + JBUFFER_TRACE(new_jh, "file as BJ_IO"); + jbd2_journal_file_buffer(new_jh, transaction, BJ_IO); + return do_escape | (done_copy_out << 1); } @@ -495,6 +483,35 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, * journal, so that we can begin checkpointing when appropriate. */ +/* + * __jbd2_log_space_left: Return the number of free blocks left in the journal. + * + * Called with the journal already locked. + * + * Called under j_state_lock + */ + +int __jbd2_log_space_left(journal_t *journal) +{ + int left = journal->j_free; + + /* assert_spin_locked(&journal->j_state_lock); */ + + /* + * Be pessimistic here about the number of those free blocks which + * might be required for log descriptor control blocks. + */ + +#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */ + + left -= MIN_LOG_RESERVED_BLOCKS; + + if (left <= 0) + return 0; + left -= (left >> 3); + return left; +} + /* * Called with j_state_lock locked for writing. * Returns true if a transaction commit was started. @@ -547,17 +564,20 @@ int jbd2_log_start_commit(journal_t *journal, tid_t tid) } /* - * Force and wait any uncommitted transactions. We can only force the running - * transaction if we don't have an active handle, otherwise, we will deadlock. - * Returns: <0 in case of error, - * 0 if nothing to commit, - * 1 if transaction was successfully committed. + * Force and wait upon a commit if the calling process is not within + * transaction. This is used for forcing out undo-protected data which contains + * bitmaps, when the fs is running out of space. + * + * We can only force the running transaction if we don't have an active handle; + * otherwise, we will deadlock. + * + * Returns true if a transaction was started. */ -static int __jbd2_journal_force_commit(journal_t *journal) +int jbd2_journal_force_commit_nested(journal_t *journal) { transaction_t *transaction = NULL; tid_t tid; - int need_to_start = 0, ret = 0; + int need_to_start = 0; read_lock(&journal->j_state_lock); if (journal->j_running_transaction && !current->journal_info) { @@ -568,53 +588,16 @@ static int __jbd2_journal_force_commit(journal_t *journal) transaction = journal->j_committing_transaction; if (!transaction) { - /* Nothing to commit */ read_unlock(&journal->j_state_lock); - return 0; + return 0; /* Nothing to retry */ } + tid = transaction->t_tid; read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); - ret = jbd2_log_wait_commit(journal, tid); - if (!ret) - ret = 1; - - return ret; -} - -/** - * Force and wait upon a commit if the calling process is not within - * transaction. This is used for forcing out undo-protected data which contains - * bitmaps, when the fs is running out of space. - * - * @journal: journal to force - * Returns true if progress was made. - */ -int jbd2_journal_force_commit_nested(journal_t *journal) -{ - int ret; - - ret = __jbd2_journal_force_commit(journal); - return ret > 0; -} - -/** - * int journal_force_commit() - force any uncommitted transactions - * @journal: journal to force - * - * Caller want unconditional commit. We can only force the running transaction - * if we don't have an active handle, otherwise, we will deadlock. - */ -int jbd2_journal_force_commit(journal_t *journal) -{ - int ret; - - J_ASSERT(!current->journal_info); - ret = __jbd2_journal_force_commit(journal); - if (ret > 0) - ret = 0; - return ret; + jbd2_log_wait_commit(journal, tid); + return 1; } /* @@ -815,7 +798,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr, * But we don't bother doing that, so there will be coherency problems with * mmaps of blockdevs which hold live JBD-controlled filesystems. */ -struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal) +struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal) { struct buffer_head *bh; unsigned long long blocknr; @@ -834,7 +817,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal) set_buffer_uptodate(bh); unlock_buffer(bh); BUFFER_TRACE(bh, "return this buffer"); - return bh; + return jbd2_journal_add_journal_head(bh); } /* @@ -1079,10 +1062,11 @@ static journal_t * journal_init_common (void) return NULL; init_waitqueue_head(&journal->j_wait_transaction_locked); + init_waitqueue_head(&journal->j_wait_logspace); init_waitqueue_head(&journal->j_wait_done_commit); + init_waitqueue_head(&journal->j_wait_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); - init_waitqueue_head(&journal->j_wait_reserved); mutex_init(&journal->j_barrier); mutex_init(&journal->j_checkpoint_mutex); spin_lock_init(&journal->j_revoke_lock); @@ -1092,7 +1076,6 @@ static journal_t * journal_init_common (void) journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE); journal->j_min_batch_time = 0; journal->j_max_batch_time = 15000; /* 15ms */ - atomic_set(&journal->j_reserved_credits, 0); /* The journal is marked for error until we succeed with recovery! */ journal->j_flags = JBD2_ABORT; @@ -1335,7 +1318,6 @@ static int journal_reset(journal_t *journal) static void jbd2_write_superblock(journal_t *journal, int write_op) { struct buffer_head *bh = journal->j_sb_buffer; - journal_superblock_t *sb = journal->j_superblock; int ret; trace_jbd2_write_superblock(journal, write_op); @@ -1357,7 +1339,6 @@ static void jbd2_write_superblock(journal_t *journal, int write_op) clear_buffer_write_io_error(bh); set_buffer_uptodate(bh); } - jbd2_superblock_csum_set(journal, sb); get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(write_op, bh); @@ -1454,6 +1435,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal) jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", journal->j_errno); sb->s_errno = cpu_to_be32(journal->j_errno); + jbd2_superblock_csum_set(journal, sb); read_unlock(&journal->j_state_lock); jbd2_write_superblock(journal, WRITE_SYNC); @@ -2343,13 +2325,13 @@ static struct journal_head *journal_alloc_journal_head(void) #ifdef CONFIG_JBD2_DEBUG atomic_inc(&nr_journal_heads); #endif - ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS); + ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); if (!ret) { jbd_debug(1, "out of memory for journal_head\n"); pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__); while (!ret) { yield(); - ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS); + ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); } } return ret; @@ -2411,8 +2393,10 @@ struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh) struct journal_head *new_jh = NULL; repeat: - if (!buffer_jbd(bh)) + if (!buffer_jbd(bh)) { new_jh = journal_alloc_journal_head(); + memset(new_jh, 0, sizeof(*new_jh)); + } jbd_lock_bh_journal_head(bh); if (buffer_jbd(bh)) { diff --git a/trunk/fs/jbd2/recovery.c b/trunk/fs/jbd2/recovery.c index d4851464b57e..626846bac32f 100644 --- a/trunk/fs/jbd2/recovery.c +++ b/trunk/fs/jbd2/recovery.c @@ -399,17 +399,18 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf) static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag, void *buf, __u32 sequence) { - __u32 csum32; + __u32 provided, calculated; if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return 1; sequence = cpu_to_be32(sequence); - csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, - sizeof(sequence)); - csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize); + calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, + sizeof(sequence)); + calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize); + provided = be32_to_cpu(tag->t_checksum); - return tag->t_checksum == cpu_to_be16(csum32); + return provided == cpu_to_be32(calculated); } static int do_one_pass(journal_t *journal, diff --git a/trunk/fs/jbd2/revoke.c b/trunk/fs/jbd2/revoke.c index 198c9c10276d..f30b80b4ce8b 100644 --- a/trunk/fs/jbd2/revoke.c +++ b/trunk/fs/jbd2/revoke.c @@ -122,10 +122,9 @@ struct jbd2_revoke_table_s #ifdef __KERNEL__ static void write_one_revoke_record(journal_t *, transaction_t *, - struct list_head *, - struct buffer_head **, int *, + struct journal_head **, int *, struct jbd2_revoke_record_s *, int); -static void flush_descriptor(journal_t *, struct buffer_head *, int, int); +static void flush_descriptor(journal_t *, struct journal_head *, int, int); #endif /* Utility functions to maintain the revoke table */ @@ -532,10 +531,9 @@ void jbd2_journal_switch_revoke_table(journal_t *journal) */ void jbd2_journal_write_revoke_records(journal_t *journal, transaction_t *transaction, - struct list_head *log_bufs, int write_op) { - struct buffer_head *descriptor; + struct journal_head *descriptor; struct jbd2_revoke_record_s *record; struct jbd2_revoke_table_s *revoke; struct list_head *hash_list; @@ -555,7 +553,7 @@ void jbd2_journal_write_revoke_records(journal_t *journal, while (!list_empty(hash_list)) { record = (struct jbd2_revoke_record_s *) hash_list->next; - write_one_revoke_record(journal, transaction, log_bufs, + write_one_revoke_record(journal, transaction, &descriptor, &offset, record, write_op); count++; @@ -575,14 +573,13 @@ void jbd2_journal_write_revoke_records(journal_t *journal, static void write_one_revoke_record(journal_t *journal, transaction_t *transaction, - struct list_head *log_bufs, - struct buffer_head **descriptorp, + struct journal_head **descriptorp, int *offsetp, struct jbd2_revoke_record_s *record, int write_op) { int csum_size = 0; - struct buffer_head *descriptor; + struct journal_head *descriptor; int offset; journal_header_t *header; @@ -612,26 +609,26 @@ static void write_one_revoke_record(journal_t *journal, descriptor = jbd2_journal_get_descriptor_buffer(journal); if (!descriptor) return; - header = (journal_header_t *)descriptor->b_data; + header = (journal_header_t *) &jh2bh(descriptor)->b_data[0]; header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK); header->h_sequence = cpu_to_be32(transaction->t_tid); /* Record it so that we can wait for IO completion later */ - BUFFER_TRACE(descriptor, "file in log_bufs"); - jbd2_file_log_bh(log_bufs, descriptor); + JBUFFER_TRACE(descriptor, "file as BJ_LogCtl"); + jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl); offset = sizeof(jbd2_journal_revoke_header_t); *descriptorp = descriptor; } if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) { - * ((__be64 *)(&descriptor->b_data[offset])) = + * ((__be64 *)(&jh2bh(descriptor)->b_data[offset])) = cpu_to_be64(record->blocknr); offset += 8; } else { - * ((__be32 *)(&descriptor->b_data[offset])) = + * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) = cpu_to_be32(record->blocknr); offset += 4; } @@ -639,7 +636,8 @@ static void write_one_revoke_record(journal_t *journal, *offsetp = offset; } -static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh) +static void jbd2_revoke_csum_set(journal_t *j, + struct journal_head *descriptor) { struct jbd2_journal_revoke_tail *tail; __u32 csum; @@ -647,10 +645,12 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh) if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return; - tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize - + tail = (struct jbd2_journal_revoke_tail *) + (jh2bh(descriptor)->b_data + j->j_blocksize - sizeof(struct jbd2_journal_revoke_tail)); tail->r_checksum = 0; - csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); + csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data, + j->j_blocksize); tail->r_checksum = cpu_to_be32(csum); } @@ -662,24 +662,25 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh) */ static void flush_descriptor(journal_t *journal, - struct buffer_head *descriptor, + struct journal_head *descriptor, int offset, int write_op) { jbd2_journal_revoke_header_t *header; + struct buffer_head *bh = jh2bh(descriptor); if (is_journal_aborted(journal)) { - put_bh(descriptor); + put_bh(bh); return; } - header = (jbd2_journal_revoke_header_t *)descriptor->b_data; + header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data; header->r_count = cpu_to_be32(offset); jbd2_revoke_csum_set(journal, descriptor); - set_buffer_jwrite(descriptor); - BUFFER_TRACE(descriptor, "write"); - set_buffer_dirty(descriptor); - write_dirty_buffer(descriptor, write_op); + set_buffer_jwrite(bh); + BUFFER_TRACE(bh, "write"); + set_buffer_dirty(bh); + write_dirty_buffer(bh, write_op); } #endif diff --git a/trunk/fs/jbd2/transaction.c b/trunk/fs/jbd2/transaction.c index 7aa9a32573bb..10f524c59ea8 100644 --- a/trunk/fs/jbd2/transaction.c +++ b/trunk/fs/jbd2/transaction.c @@ -89,8 +89,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction) transaction->t_expires = jiffies + journal->j_commit_interval; spin_lock_init(&transaction->t_handle_lock); atomic_set(&transaction->t_updates, 0); - atomic_set(&transaction->t_outstanding_credits, - atomic_read(&journal->j_reserved_credits)); + atomic_set(&transaction->t_outstanding_credits, 0); atomic_set(&transaction->t_handle_count, 0); INIT_LIST_HEAD(&transaction->t_inode_list); INIT_LIST_HEAD(&transaction->t_private_list); @@ -141,112 +140,6 @@ static inline void update_t_max_wait(transaction_t *transaction, #endif } -/* - * Wait until running transaction passes T_LOCKED state. Also starts the commit - * if needed. The function expects running transaction to exist and releases - * j_state_lock. - */ -static void wait_transaction_locked(journal_t *journal) - __releases(journal->j_state_lock) -{ - DEFINE_WAIT(wait); - int need_to_start; - tid_t tid = journal->j_running_transaction->t_tid; - - prepare_to_wait(&journal->j_wait_transaction_locked, &wait, - TASK_UNINTERRUPTIBLE); - need_to_start = !tid_geq(journal->j_commit_request, tid); - read_unlock(&journal->j_state_lock); - if (need_to_start) - jbd2_log_start_commit(journal, tid); - schedule(); - finish_wait(&journal->j_wait_transaction_locked, &wait); -} - -static void sub_reserved_credits(journal_t *journal, int blocks) -{ - atomic_sub(blocks, &journal->j_reserved_credits); - wake_up(&journal->j_wait_reserved); -} - -/* - * Wait until we can add credits for handle to the running transaction. Called - * with j_state_lock held for reading. Returns 0 if handle joined the running - * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and - * caller must retry. - */ -static int add_transaction_credits(journal_t *journal, int blocks, - int rsv_blocks) -{ - transaction_t *t = journal->j_running_transaction; - int needed; - int total = blocks + rsv_blocks; - - /* - * If the current transaction is locked down for commit, wait - * for the lock to be released. - */ - if (t->t_state == T_LOCKED) { - wait_transaction_locked(journal); - return 1; - } - - /* - * If there is not enough space left in the log to write all - * potential buffers requested by this operation, we need to - * stall pending a log checkpoint to free some more log space. - */ - needed = atomic_add_return(total, &t->t_outstanding_credits); - if (needed > journal->j_max_transaction_buffers) { - /* - * If the current transaction is already too large, - * then start to commit it: we can then go back and - * attach this handle to a new transaction. - */ - atomic_sub(total, &t->t_outstanding_credits); - wait_transaction_locked(journal); - return 1; - } - - /* - * The commit code assumes that it can get enough log space - * without forcing a checkpoint. This is *critical* for - * correctness: a checkpoint of a buffer which is also - * associated with a committing transaction creates a deadlock, - * so commit simply cannot force through checkpoints. - * - * We must therefore ensure the necessary space in the journal - * *before* starting to dirty potentially checkpointed buffers - * in the new transaction. - */ - if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) { - atomic_sub(total, &t->t_outstanding_credits); - read_unlock(&journal->j_state_lock); - write_lock(&journal->j_state_lock); - if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) - __jbd2_log_wait_for_space(journal); - write_unlock(&journal->j_state_lock); - return 1; - } - - /* No reservation? We are done... */ - if (!rsv_blocks) - return 0; - - needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits); - /* We allow at most half of a transaction to be reserved */ - if (needed > journal->j_max_transaction_buffers / 2) { - sub_reserved_credits(journal, rsv_blocks); - atomic_sub(total, &t->t_outstanding_credits); - read_unlock(&journal->j_state_lock); - wait_event(journal->j_wait_reserved, - atomic_read(&journal->j_reserved_credits) + rsv_blocks - <= journal->j_max_transaction_buffers / 2); - return 1; - } - return 0; -} - /* * start_this_handle: Given a handle, deal with any locking or stalling * needed to make sure that there is enough journal space for the handle @@ -258,24 +151,18 @@ static int start_this_handle(journal_t *journal, handle_t *handle, gfp_t gfp_mask) { transaction_t *transaction, *new_transaction = NULL; - int blocks = handle->h_buffer_credits; - int rsv_blocks = 0; + tid_t tid; + int needed, need_to_start; + int nblocks = handle->h_buffer_credits; unsigned long ts = jiffies; - /* - * 1/2 of transaction can be reserved so we can practically handle - * only 1/2 of maximum transaction size per operation - */ - if (WARN_ON(blocks > journal->j_max_transaction_buffers / 2)) { + if (nblocks > journal->j_max_transaction_buffers) { printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n", - current->comm, blocks, - journal->j_max_transaction_buffers / 2); + current->comm, nblocks, + journal->j_max_transaction_buffers); return -ENOSPC; } - if (handle->h_rsv_handle) - rsv_blocks = handle->h_rsv_handle->h_buffer_credits; - alloc_transaction: if (!journal->j_running_transaction) { new_transaction = kmem_cache_zalloc(transaction_cache, @@ -312,12 +199,8 @@ static int start_this_handle(journal_t *journal, handle_t *handle, return -EROFS; } - /* - * Wait on the journal's transaction barrier if necessary. Specifically - * we allow reserved handles to proceed because otherwise commit could - * deadlock on page writeback not being able to complete. - */ - if (!handle->h_reserved && journal->j_barrier_count) { + /* Wait on the journal's transaction barrier if necessary */ + if (journal->j_barrier_count) { read_unlock(&journal->j_state_lock); wait_event(journal->j_wait_transaction_locked, journal->j_barrier_count == 0); @@ -330,7 +213,7 @@ static int start_this_handle(journal_t *journal, handle_t *handle, goto alloc_transaction; write_lock(&journal->j_state_lock); if (!journal->j_running_transaction && - (handle->h_reserved || !journal->j_barrier_count)) { + !journal->j_barrier_count) { jbd2_get_transaction(journal, new_transaction); new_transaction = NULL; } @@ -340,18 +223,85 @@ static int start_this_handle(journal_t *journal, handle_t *handle, transaction = journal->j_running_transaction; - if (!handle->h_reserved) { - /* We may have dropped j_state_lock - restart in that case */ - if (add_transaction_credits(journal, blocks, rsv_blocks)) - goto repeat; - } else { + /* + * If the current transaction is locked down for commit, wait for the + * lock to be released. + */ + if (transaction->t_state == T_LOCKED) { + DEFINE_WAIT(wait); + + prepare_to_wait(&journal->j_wait_transaction_locked, + &wait, TASK_UNINTERRUPTIBLE); + read_unlock(&journal->j_state_lock); + schedule(); + finish_wait(&journal->j_wait_transaction_locked, &wait); + goto repeat; + } + + /* + * If there is not enough space left in the log to write all potential + * buffers requested by this operation, we need to stall pending a log + * checkpoint to free some more log space. + */ + needed = atomic_add_return(nblocks, + &transaction->t_outstanding_credits); + + if (needed > journal->j_max_transaction_buffers) { /* - * We have handle reserved so we are allowed to join T_LOCKED - * transaction and we don't have to check for transaction size - * and journal space. + * If the current transaction is already too large, then start + * to commit it: we can then go back and attach this handle to + * a new transaction. */ - sub_reserved_credits(journal, blocks); - handle->h_reserved = 0; + DEFINE_WAIT(wait); + + jbd_debug(2, "Handle %p starting new commit...\n", handle); + atomic_sub(nblocks, &transaction->t_outstanding_credits); + prepare_to_wait(&journal->j_wait_transaction_locked, &wait, + TASK_UNINTERRUPTIBLE); + tid = transaction->t_tid; + need_to_start = !tid_geq(journal->j_commit_request, tid); + read_unlock(&journal->j_state_lock); + if (need_to_start) + jbd2_log_start_commit(journal, tid); + schedule(); + finish_wait(&journal->j_wait_transaction_locked, &wait); + goto repeat; + } + + /* + * The commit code assumes that it can get enough log space + * without forcing a checkpoint. This is *critical* for + * correctness: a checkpoint of a buffer which is also + * associated with a committing transaction creates a deadlock, + * so commit simply cannot force through checkpoints. + * + * We must therefore ensure the necessary space in the journal + * *before* starting to dirty potentially checkpointed buffers + * in the new transaction. + * + * The worst part is, any transaction currently committing can + * reduce the free space arbitrarily. Be careful to account for + * those buffers when checkpointing. + */ + + /* + * @@@ AKPM: This seems rather over-defensive. We're giving commit + * a _lot_ of headroom: 1/4 of the journal plus the size of + * the committing transaction. Really, we only need to give it + * committing_transaction->t_outstanding_credits plus "enough" for + * the log control blocks. + * Also, this test is inconsistent with the matching one in + * jbd2_journal_extend(). + */ + if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { + jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle); + atomic_sub(nblocks, &transaction->t_outstanding_credits); + read_unlock(&journal->j_state_lock); + write_lock(&journal->j_state_lock); + if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) + __jbd2_log_wait_for_space(journal); + write_unlock(&journal->j_state_lock); + goto repeat; } /* OK, account for the buffers that this operation expects to @@ -359,16 +309,15 @@ static int start_this_handle(journal_t *journal, handle_t *handle, */ update_t_max_wait(transaction, ts); handle->h_transaction = transaction; - handle->h_requested_credits = blocks; + handle->h_requested_credits = nblocks; handle->h_start_jiffies = jiffies; atomic_inc(&transaction->t_updates); atomic_inc(&transaction->t_handle_count); - jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n", - handle, blocks, + jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", + handle, nblocks, atomic_read(&transaction->t_outstanding_credits), - jbd2_log_space_left(journal)); + __jbd2_log_space_left(journal)); read_unlock(&journal->j_state_lock); - current->journal_info = handle; lock_map_acquire(&handle->h_lockdep_map); jbd2_journal_free_transaction(new_transaction); @@ -399,21 +348,16 @@ static handle_t *new_handle(int nblocks) * * We make sure that the transaction can guarantee at least nblocks of * modified buffers in the log. We block until the log can guarantee - * that much space. Additionally, if rsv_blocks > 0, we also create another - * handle with rsv_blocks reserved blocks in the journal. This handle is - * is stored in h_rsv_handle. It is not attached to any particular transaction - * and thus doesn't block transaction commit. If the caller uses this reserved - * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop() - * on the parent handle will dispose the reserved one. Reserved handle has to - * be converted to a normal handle using jbd2_journal_start_reserved() before - * it can be used. + * that much space. + * + * This function is visible to journal users (like ext3fs), so is not + * called with the journal already locked. * * Return a pointer to a newly allocated handle, or an ERR_PTR() value * on failure. */ -handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks, - gfp_t gfp_mask, unsigned int type, - unsigned int line_no) +handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask, + unsigned int type, unsigned int line_no) { handle_t *handle = journal_current_handle(); int err; @@ -430,24 +374,13 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks, handle = new_handle(nblocks); if (!handle) return ERR_PTR(-ENOMEM); - if (rsv_blocks) { - handle_t *rsv_handle; - rsv_handle = new_handle(rsv_blocks); - if (!rsv_handle) { - jbd2_free_handle(handle); - return ERR_PTR(-ENOMEM); - } - rsv_handle->h_reserved = 1; - rsv_handle->h_journal = journal; - handle->h_rsv_handle = rsv_handle; - } + current->journal_info = handle; err = start_this_handle(journal, handle, gfp_mask); if (err < 0) { - if (handle->h_rsv_handle) - jbd2_free_handle(handle->h_rsv_handle); jbd2_free_handle(handle); + current->journal_info = NULL; return ERR_PTR(err); } handle->h_type = type; @@ -462,65 +395,10 @@ EXPORT_SYMBOL(jbd2__journal_start); handle_t *jbd2_journal_start(journal_t *journal, int nblocks) { - return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0); + return jbd2__journal_start(journal, nblocks, GFP_NOFS, 0, 0); } EXPORT_SYMBOL(jbd2_journal_start); -void jbd2_journal_free_reserved(handle_t *handle) -{ - journal_t *journal = handle->h_journal; - - WARN_ON(!handle->h_reserved); - sub_reserved_credits(journal, handle->h_buffer_credits); - jbd2_free_handle(handle); -} -EXPORT_SYMBOL(jbd2_journal_free_reserved); - -/** - * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle - * @handle: handle to start - * - * Start handle that has been previously reserved with jbd2_journal_reserve(). - * This attaches @handle to the running transaction (or creates one if there's - * not transaction running). Unlike jbd2_journal_start() this function cannot - * block on journal commit, checkpointing, or similar stuff. It can block on - * memory allocation or frozen journal though. - * - * Return 0 on success, non-zero on error - handle is freed in that case. - */ -int jbd2_journal_start_reserved(handle_t *handle, unsigned int type, - unsigned int line_no) -{ - journal_t *journal = handle->h_journal; - int ret = -EIO; - - if (WARN_ON(!handle->h_reserved)) { - /* Someone passed in normal handle? Just stop it. */ - jbd2_journal_stop(handle); - return ret; - } - /* - * Usefulness of mixing of reserved and unreserved handles is - * questionable. So far nobody seems to need it so just error out. - */ - if (WARN_ON(current->journal_info)) { - jbd2_journal_free_reserved(handle); - return ret; - } - - handle->h_journal = NULL; - /* - * GFP_NOFS is here because callers are likely from writeback or - * similarly constrained call sites - */ - ret = start_this_handle(journal, handle, GFP_NOFS); - if (ret < 0) - jbd2_journal_free_reserved(handle); - handle->h_type = type; - handle->h_line_no = line_no; - return ret; -} -EXPORT_SYMBOL(jbd2_journal_start_reserved); /** * int jbd2_journal_extend() - extend buffer credits. @@ -545,53 +423,49 @@ EXPORT_SYMBOL(jbd2_journal_start_reserved); int jbd2_journal_extend(handle_t *handle, int nblocks) { transaction_t *transaction = handle->h_transaction; - journal_t *journal; + journal_t *journal = transaction->t_journal; int result; int wanted; - WARN_ON(!transaction); + result = -EIO; if (is_handle_aborted(handle)) - return -EROFS; - journal = transaction->t_journal; + goto out; result = 1; read_lock(&journal->j_state_lock); /* Don't extend a locked-down transaction! */ - if (transaction->t_state != T_RUNNING) { + if (handle->h_transaction->t_state != T_RUNNING) { jbd_debug(3, "denied handle %p %d blocks: " "transaction not running\n", handle, nblocks); goto error_out; } spin_lock(&transaction->t_handle_lock); - wanted = atomic_add_return(nblocks, - &transaction->t_outstanding_credits); + wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks; if (wanted > journal->j_max_transaction_buffers) { jbd_debug(3, "denied handle %p %d blocks: " "transaction too large\n", handle, nblocks); - atomic_sub(nblocks, &transaction->t_outstanding_credits); goto unlock; } - if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) > - jbd2_log_space_left(journal)) { + if (wanted > __jbd2_log_space_left(journal)) { jbd_debug(3, "denied handle %p %d blocks: " "insufficient log space\n", handle, nblocks); - atomic_sub(nblocks, &transaction->t_outstanding_credits); goto unlock; } trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev, - transaction->t_tid, + handle->h_transaction->t_tid, handle->h_type, handle->h_line_no, handle->h_buffer_credits, nblocks); handle->h_buffer_credits += nblocks; handle->h_requested_credits += nblocks; + atomic_add(nblocks, &transaction->t_outstanding_credits); result = 0; jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); @@ -599,6 +473,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks) spin_unlock(&transaction->t_handle_lock); error_out: read_unlock(&journal->j_state_lock); +out: return result; } @@ -615,22 +490,19 @@ int jbd2_journal_extend(handle_t *handle, int nblocks) * to a running handle, a call to jbd2_journal_restart will commit the * handle's transaction so far and reattach the handle to a new * transaction capabable of guaranteeing the requested number of - * credits. We preserve reserved handle if there's any attached to the - * passed in handle. + * credits. */ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) { transaction_t *transaction = handle->h_transaction; - journal_t *journal; + journal_t *journal = transaction->t_journal; tid_t tid; int need_to_start, ret; - WARN_ON(!transaction); /* If we've had an abort of any type, don't even think about * actually doing the restart! */ if (is_handle_aborted(handle)) return 0; - journal = transaction->t_journal; /* * First unlink the handle from its current transaction, and start the @@ -643,18 +515,12 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) spin_lock(&transaction->t_handle_lock); atomic_sub(handle->h_buffer_credits, &transaction->t_outstanding_credits); - if (handle->h_rsv_handle) { - sub_reserved_credits(journal, - handle->h_rsv_handle->h_buffer_credits); - } if (atomic_dec_and_test(&transaction->t_updates)) wake_up(&journal->j_wait_updates); - tid = transaction->t_tid; spin_unlock(&transaction->t_handle_lock); - handle->h_transaction = NULL; - current->journal_info = NULL; jbd_debug(2, "restarting handle %p\n", handle); + tid = transaction->t_tid; need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) @@ -691,14 +557,6 @@ void jbd2_journal_lock_updates(journal_t *journal) write_lock(&journal->j_state_lock); ++journal->j_barrier_count; - /* Wait until there are no reserved handles */ - if (atomic_read(&journal->j_reserved_credits)) { - write_unlock(&journal->j_state_lock); - wait_event(journal->j_wait_reserved, - atomic_read(&journal->j_reserved_credits) == 0); - write_lock(&journal->j_state_lock); - } - /* Wait until there are no running updates */ while (1) { transaction_t *transaction = journal->j_running_transaction; @@ -761,12 +619,6 @@ static void warn_dirty_buffer(struct buffer_head *bh) bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); } -static int sleep_on_shadow_bh(void *word) -{ - io_schedule(); - return 0; -} - /* * If the buffer is already part of the current transaction, then there * is nothing we need to do. If it is already part of a prior @@ -782,16 +634,17 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, int force_copy) { struct buffer_head *bh; - transaction_t *transaction = handle->h_transaction; + transaction_t *transaction; journal_t *journal; int error; char *frozen_buffer = NULL; int need_copy = 0; unsigned long start_lock, time_lock; - WARN_ON(!transaction); if (is_handle_aborted(handle)) return -EROFS; + + transaction = handle->h_transaction; journal = transaction->t_journal; jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); @@ -901,29 +754,41 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, * journaled. If the primary copy is already going to * disk then we cannot do copy-out here. */ - if (buffer_shadow(bh)) { + if (jh->b_jlist == BJ_Shadow) { + DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow); + wait_queue_head_t *wqh; + + wqh = bit_waitqueue(&bh->b_state, BH_Unshadow); + JBUFFER_TRACE(jh, "on shadow: sleep"); jbd_unlock_bh_state(bh); - wait_on_bit(&bh->b_state, BH_Shadow, - sleep_on_shadow_bh, TASK_UNINTERRUPTIBLE); + /* commit wakes up all shadow buffers after IO */ + for ( ; ; ) { + prepare_to_wait(wqh, &wait.wait, + TASK_UNINTERRUPTIBLE); + if (jh->b_jlist != BJ_Shadow) + break; + schedule(); + } + finish_wait(wqh, &wait.wait); goto repeat; } - /* - * Only do the copy if the currently-owning transaction still - * needs it. If buffer isn't on BJ_Metadata list, the - * committing transaction is past that stage (here we use the - * fact that BH_Shadow is set under bh_state lock together with - * refiling to BJ_Shadow list and at this point we know the - * buffer doesn't have BH_Shadow set). + /* Only do the copy if the currently-owning transaction + * still needs it. If it is on the Forget list, the + * committing transaction is past that stage. The + * buffer had better remain locked during the kmalloc, + * but that should be true --- we hold the journal lock + * still and the buffer is already on the BUF_JOURNAL + * list so won't be flushed. * * Subtle point, though: if this is a get_undo_access, * then we will be relying on the frozen_data to contain * the new value of the committed_data record after the * transaction, so we HAVE to force the frozen_data copy - * in that case. - */ - if (jh->b_jlist == BJ_Metadata || force_copy) { + * in that case. */ + + if (jh->b_jlist != BJ_Forget || force_copy) { JBUFFER_TRACE(jh, "generate frozen data"); if (!frozen_buffer) { JBUFFER_TRACE(jh, "allocate memory for buffer"); @@ -1050,16 +915,14 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; - journal_t *journal; + journal_t *journal = transaction->t_journal; struct journal_head *jh = jbd2_journal_add_journal_head(bh); int err; jbd_debug(5, "journal_head %p\n", jh); - WARN_ON(!transaction); err = -EROFS; if (is_handle_aborted(handle)) goto out; - journal = transaction->t_journal; err = 0; JBUFFER_TRACE(jh, "entry"); @@ -1265,14 +1128,12 @@ void jbd2_buffer_abort_trigger(struct journal_head *jh, int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; - journal_t *journal; + journal_t *journal = transaction->t_journal; struct journal_head *jh; int ret = 0; - WARN_ON(!transaction); if (is_handle_aborted(handle)) - return -EROFS; - journal = transaction->t_journal; + goto out; jh = jbd2_journal_grab_journal_head(bh); if (!jh) { ret = -EUCLEAN; @@ -1366,7 +1227,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) JBUFFER_TRACE(jh, "file as BJ_Metadata"); spin_lock(&journal->j_list_lock); - __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata); + __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata); spin_unlock(&journal->j_list_lock); out_unlock_bh: jbd_unlock_bh_state(bh); @@ -1397,17 +1258,12 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; - journal_t *journal; + journal_t *journal = transaction->t_journal; struct journal_head *jh; int drop_reserve = 0; int err = 0; int was_modified = 0; - WARN_ON(!transaction); - if (is_handle_aborted(handle)) - return -EROFS; - journal = transaction->t_journal; - BUFFER_TRACE(bh, "entry"); jbd_lock_bh_state(bh); @@ -1434,7 +1290,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) */ jh->b_modified = 0; - if (jh->b_transaction == transaction) { + if (jh->b_transaction == handle->h_transaction) { J_ASSERT_JH(jh, !jh->b_frozen_data); /* If we are forgetting a buffer which is already part @@ -1529,21 +1385,19 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) int jbd2_journal_stop(handle_t *handle) { transaction_t *transaction = handle->h_transaction; - journal_t *journal; - int err = 0, wait_for_commit = 0; + journal_t *journal = transaction->t_journal; + int err, wait_for_commit = 0; tid_t tid; pid_t pid; - if (!transaction) - goto free_and_exit; - journal = transaction->t_journal; - J_ASSERT(journal_current_handle() == handle); if (is_handle_aborted(handle)) err = -EIO; - else + else { J_ASSERT(atomic_read(&transaction->t_updates) > 0); + err = 0; + } if (--handle->h_ref > 0) { jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, @@ -1553,7 +1407,7 @@ int jbd2_journal_stop(handle_t *handle) jbd_debug(4, "Handle %p going down\n", handle); trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev, - transaction->t_tid, + handle->h_transaction->t_tid, handle->h_type, handle->h_line_no, jiffies - handle->h_start_jiffies, handle->h_sync, handle->h_requested_credits, @@ -1664,13 +1518,33 @@ int jbd2_journal_stop(handle_t *handle) lock_map_release(&handle->h_lockdep_map); - if (handle->h_rsv_handle) - jbd2_journal_free_reserved(handle->h_rsv_handle); -free_and_exit: jbd2_free_handle(handle); return err; } +/** + * int jbd2_journal_force_commit() - force any uncommitted transactions + * @journal: journal to force + * + * For synchronous operations: force any uncommitted transactions + * to disk. May seem kludgy, but it reuses all the handle batching + * code in a very simple manner. + */ +int jbd2_journal_force_commit(journal_t *journal) +{ + handle_t *handle; + int ret; + + handle = jbd2_journal_start(journal, 1); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + } else { + handle->h_sync = 1; + ret = jbd2_journal_stop(handle); + } + return ret; +} + /* * * List management code snippets: various functions for manipulating the @@ -1727,10 +1601,10 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh) * Remove a buffer from the appropriate transaction list. * * Note that this function can *change* the value of - * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or - * t_reserved_list. If the caller is holding onto a copy of one of these - * pointers, it could go bad. Generally the caller needs to re-read the - * pointer from the transaction_t. + * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list, + * t_log_list or t_reserved_list. If the caller is holding onto a copy of one + * of these pointers, it could go bad. Generally the caller needs to re-read + * the pointer from the transaction_t. * * Called under j_list_lock. */ @@ -1760,9 +1634,15 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) case BJ_Forget: list = &transaction->t_forget; break; + case BJ_IO: + list = &transaction->t_iobuf_list; + break; case BJ_Shadow: list = &transaction->t_shadow_list; break; + case BJ_LogCtl: + list = &transaction->t_log_list; + break; case BJ_Reserved: list = &transaction->t_reserved_list; break; @@ -2154,23 +2034,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, * void jbd2_journal_invalidatepage() * @journal: journal to use for flush... * @page: page to flush - * @offset: start of the range to invalidate - * @length: length of the range to invalidate + * @offset: length of page to invalidate. * - * Reap page buffers containing data after in the specified range in page. - * Can return -EBUSY if buffers are part of the committing transaction and - * the page is straddling i_size. Caller then has to wait for current commit - * and try again. + * Reap page buffers containing data after offset in page. Can return -EBUSY + * if buffers are part of the committing transaction and the page is straddling + * i_size. Caller then has to wait for current commit and try again. */ int jbd2_journal_invalidatepage(journal_t *journal, struct page *page, - unsigned int offset, - unsigned int length) + unsigned long offset) { struct buffer_head *head, *bh, *next; - unsigned int stop = offset + length; unsigned int curr_off = 0; - int partial_page = (offset || length < PAGE_CACHE_SIZE); int may_free = 1; int ret = 0; @@ -2179,8 +2054,6 @@ int jbd2_journal_invalidatepage(journal_t *journal, if (!page_has_buffers(page)) return 0; - BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); - /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be * cautious in our locking. */ @@ -2190,13 +2063,10 @@ int jbd2_journal_invalidatepage(journal_t *journal, unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; - if (next_off > stop) - return 0; - if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); - ret = journal_unmap_buffer(journal, bh, partial_page); + ret = journal_unmap_buffer(journal, bh, offset > 0); unlock_buffer(bh); if (ret < 0) return ret; @@ -2207,7 +2077,7 @@ int jbd2_journal_invalidatepage(journal_t *journal, } while (bh != head); - if (!partial_page) { + if (!offset) { if (may_free && try_to_free_buffers(page)) J_ASSERT(!page_has_buffers(page)); } @@ -2268,9 +2138,15 @@ void __jbd2_journal_file_buffer(struct journal_head *jh, case BJ_Forget: list = &transaction->t_forget; break; + case BJ_IO: + list = &transaction->t_iobuf_list; + break; case BJ_Shadow: list = &transaction->t_shadow_list; break; + case BJ_LogCtl: + list = &transaction->t_log_list; + break; case BJ_Reserved: list = &transaction->t_reserved_list; break; @@ -2372,12 +2248,10 @@ void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode) { transaction_t *transaction = handle->h_transaction; - journal_t *journal; + journal_t *journal = transaction->t_journal; - WARN_ON(!transaction); if (is_handle_aborted(handle)) - return -EROFS; - journal = transaction->t_journal; + return -EIO; jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino, transaction->t_tid); diff --git a/trunk/fs/jffs2/dir.c b/trunk/fs/jffs2/dir.c index e3aac222472e..acd46a4160cb 100644 --- a/trunk/fs/jffs2/dir.c +++ b/trunk/fs/jffs2/dir.c @@ -22,7 +22,7 @@ #include #include "nodelist.h" -static int jffs2_readdir (struct file *, struct dir_context *); +static int jffs2_readdir (struct file *, void *, filldir_t); static int jffs2_create (struct inode *,struct dentry *,umode_t, bool); @@ -40,7 +40,7 @@ static int jffs2_rename (struct inode *, struct dentry *, const struct file_operations jffs2_dir_operations = { .read = generic_read_dir, - .iterate = jffs2_readdir, + .readdir = jffs2_readdir, .unlocked_ioctl=jffs2_ioctl, .fsync = jffs2_fsync, .llseek = generic_file_llseek, @@ -114,40 +114,60 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, /***********************************************************************/ -static int jffs2_readdir(struct file *file, struct dir_context *ctx) +static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); - struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); + struct jffs2_inode_info *f; + struct inode *inode = file_inode(filp); struct jffs2_full_dirent *fd; - unsigned long curofs = 1; + unsigned long offset, curofs; - jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", inode->i_ino); + jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", + file_inode(filp)->i_ino); - if (!dir_emit_dots(file, ctx)) - return 0; + f = JFFS2_INODE_INFO(inode); + + offset = filp->f_pos; + + if (offset == 0) { + jffs2_dbg(1, "Dirent 0: \".\", ino #%lu\n", inode->i_ino); + if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) + goto out; + offset++; + } + if (offset == 1) { + unsigned long pino = parent_ino(filp->f_path.dentry); + jffs2_dbg(1, "Dirent 1: \"..\", ino #%lu\n", pino); + if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0) + goto out; + offset++; + } + curofs=1; mutex_lock(&f->sem); for (fd = f->dents; fd; fd = fd->next) { + curofs++; - /* First loop: curofs = 2; pos = 2 */ - if (curofs < ctx->pos) { + /* First loop: curofs = 2; offset = 2 */ + if (curofs < offset) { jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", - fd->name, fd->ino, fd->type, curofs, (unsigned long)ctx->pos); + fd->name, fd->ino, fd->type, curofs, offset); continue; } if (!fd->ino) { jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n", fd->name); - ctx->pos++; + offset++; continue; } jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n", - (unsigned long)ctx->pos, fd->name, fd->ino, fd->type); - if (!dir_emit(ctx, fd->name, strlen(fd->name), fd->ino, fd->type)) + offset, fd->name, fd->ino, fd->type); + if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0) break; - ctx->pos++; + offset++; } mutex_unlock(&f->sem); + out: + filp->f_pos = offset; return 0; } diff --git a/trunk/fs/jfs/jfs_dtree.c b/trunk/fs/jfs/jfs_dtree.c index 9f4ed13d9f15..0ddbeceafc62 100644 --- a/trunk/fs/jfs/jfs_dtree.c +++ b/trunk/fs/jfs/jfs_dtree.c @@ -3002,9 +3002,9 @@ static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent) * return: offset = (pn, index) of start entry * of next jfs_readdir()/dtRead() */ -int jfs_readdir(struct file *file, struct dir_context *ctx) +int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *ip = file_inode(file); + struct inode *ip = file_inode(filp); struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab; int rc = 0; loff_t dtpos; /* legacy OS/2 style position */ @@ -3033,7 +3033,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) int overflow, fix_page, page_fixed = 0; static int unique_pos = 2; /* If we can't fix broken index */ - if (ctx->pos == DIREND) + if (filp->f_pos == DIREND) return 0; if (DO_INDEX(ip)) { @@ -3045,7 +3045,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) */ do_index = 1; - dir_index = (u32) ctx->pos; + dir_index = (u32) filp->f_pos; if (dir_index > 1) { struct dir_table_slot dirtab_slot; @@ -3053,25 +3053,25 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) if (dtEmpty(ip) || (dir_index >= JFS_IP(ip)->next_index)) { /* Stale position. Directory has shrunk */ - ctx->pos = DIREND; + filp->f_pos = DIREND; return 0; } repeat: rc = read_index(ip, dir_index, &dirtab_slot); if (rc) { - ctx->pos = DIREND; + filp->f_pos = DIREND; return rc; } if (dirtab_slot.flag == DIR_INDEX_FREE) { if (loop_count++ > JFS_IP(ip)->next_index) { jfs_err("jfs_readdir detected " "infinite loop!"); - ctx->pos = DIREND; + filp->f_pos = DIREND; return 0; } dir_index = le32_to_cpu(dirtab_slot.addr2); if (dir_index == -1) { - ctx->pos = DIREND; + filp->f_pos = DIREND; return 0; } goto repeat; @@ -3080,13 +3080,13 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) index = dirtab_slot.slot; DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) { - ctx->pos = DIREND; + filp->f_pos = DIREND; return 0; } if (p->header.flag & BT_INTERNAL) { jfs_err("jfs_readdir: bad index table"); DT_PUTPAGE(mp); - ctx->pos = -1; + filp->f_pos = -1; return 0; } } else { @@ -3094,22 +3094,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) /* * self "." */ - ctx->pos = 0; - if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) + filp->f_pos = 0; + if (filldir(dirent, ".", 1, 0, ip->i_ino, + DT_DIR)) return 0; } /* * parent ".." */ - ctx->pos = 1; - if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) + filp->f_pos = 1; + if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR)) return 0; /* * Find first entry of left-most leaf */ if (dtEmpty(ip)) { - ctx->pos = DIREND; + filp->f_pos = DIREND; return 0; } @@ -3127,19 +3128,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) * pn > 0: Real entries, pn=1 -> leftmost page * pn = index = -1: No more entries */ - dtpos = ctx->pos; + dtpos = filp->f_pos; if (dtpos == 0) { /* build "." entry */ - if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) + + if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino, + DT_DIR)) return 0; dtoffset->index = 1; - ctx->pos = dtpos; + filp->f_pos = dtpos; } if (dtoffset->pn == 0) { if (dtoffset->index == 1) { /* build ".." entry */ - if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) + + if (filldir(dirent, "..", 2, filp->f_pos, + PARENT(ip), DT_DIR)) return 0; } else { jfs_err("jfs_readdir called with " @@ -3147,18 +3152,18 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) } dtoffset->pn = 1; dtoffset->index = 0; - ctx->pos = dtpos; + filp->f_pos = dtpos; } if (dtEmpty(ip)) { - ctx->pos = DIREND; + filp->f_pos = DIREND; return 0; } - if ((rc = dtReadNext(ip, &ctx->pos, &btstack))) { + if ((rc = dtReadNext(ip, &filp->f_pos, &btstack))) { jfs_err("jfs_readdir: unexpected rc = %d " "from dtReadNext", rc); - ctx->pos = DIREND; + filp->f_pos = DIREND; return 0; } /* get start leaf page and index */ @@ -3166,7 +3171,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) /* offset beyond directory eof ? */ if (bn < 0) { - ctx->pos = DIREND; + filp->f_pos = DIREND; return 0; } } @@ -3175,7 +3180,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) if (dirent_buf == 0) { DT_PUTPAGE(mp); jfs_warn("jfs_readdir: __get_free_page failed!"); - ctx->pos = DIREND; + filp->f_pos = DIREND; return -ENOMEM; } @@ -3290,9 +3295,9 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) jfs_dirent = (struct jfs_dirent *) dirent_buf; while (jfs_dirents--) { - ctx->pos = jfs_dirent->position; - if (!dir_emit(ctx, jfs_dirent->name, - jfs_dirent->name_len, + filp->f_pos = jfs_dirent->position; + if (filldir(dirent, jfs_dirent->name, + jfs_dirent->name_len, filp->f_pos, jfs_dirent->ino, DT_UNKNOWN)) goto out; jfs_dirent = next_jfs_dirent(jfs_dirent); @@ -3304,7 +3309,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) } if (!overflow && (bn == 0)) { - ctx->pos = DIREND; + filp->f_pos = DIREND; break; } diff --git a/trunk/fs/jfs/jfs_dtree.h b/trunk/fs/jfs/jfs_dtree.h index fd4169e6e698..2545bb317235 100644 --- a/trunk/fs/jfs/jfs_dtree.h +++ b/trunk/fs/jfs/jfs_dtree.h @@ -265,5 +265,5 @@ extern int dtDelete(tid_t tid, struct inode *ip, struct component_name * key, extern int dtModify(tid_t tid, struct inode *ip, struct component_name * key, ino_t * orig_ino, ino_t new_ino, int flag); -extern int jfs_readdir(struct file *file, struct dir_context *ctx); +extern int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir); #endif /* !_H_JFS_DTREE */ diff --git a/trunk/fs/jfs/jfs_logmgr.c b/trunk/fs/jfs/jfs_logmgr.c index 360d27c48887..c57499dca89c 100644 --- a/trunk/fs/jfs/jfs_logmgr.c +++ b/trunk/fs/jfs/jfs_logmgr.c @@ -2009,13 +2009,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) bio->bi_end_io = lbmIODone; bio->bi_private = bp; - /*check if journaling to disk has been disabled*/ - if (log->no_integrity) { - bio->bi_size = 0; - lbmIODone(bio, 0); - } else { - submit_bio(READ_SYNC, bio); - } + submit_bio(READ_SYNC, bio); wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); diff --git a/trunk/fs/jfs/jfs_metapage.c b/trunk/fs/jfs/jfs_metapage.c index 9e3aaff11f89..6740d34cd82b 100644 --- a/trunk/fs/jfs/jfs_metapage.c +++ b/trunk/fs/jfs/jfs_metapage.c @@ -571,10 +571,9 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask) return ret; } -static void metapage_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void metapage_invalidatepage(struct page *page, unsigned long offset) { - BUG_ON(offset || length < PAGE_CACHE_SIZE); + BUG_ON(offset); BUG_ON(PageWriteback(page)); diff --git a/trunk/fs/jfs/namei.c b/trunk/fs/jfs/namei.c index 89186b7b9002..3b91a7ad6086 100644 --- a/trunk/fs/jfs/namei.c +++ b/trunk/fs/jfs/namei.c @@ -1529,7 +1529,7 @@ const struct inode_operations jfs_dir_inode_operations = { const struct file_operations jfs_dir_operations = { .read = generic_read_dir, - .iterate = jfs_readdir, + .readdir = jfs_readdir, .fsync = jfs_fsync, .unlocked_ioctl = jfs_ioctl, #ifdef CONFIG_COMPAT diff --git a/trunk/fs/jfs/super.c b/trunk/fs/jfs/super.c index 788e0a9c1fb0..2003e830ed1c 100644 --- a/trunk/fs/jfs/super.c +++ b/trunk/fs/jfs/super.c @@ -611,28 +611,11 @@ static int jfs_freeze(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); struct jfs_log *log = sbi->log; - int rc = 0; if (!(sb->s_flags & MS_RDONLY)) { txQuiesce(sb); - rc = lmLogShutdown(log); - if (rc) { - jfs_error(sb, "jfs_freeze: lmLogShutdown failed"); - - /* let operations fail rather than hang */ - txResume(sb); - - return rc; - } - rc = updateSuper(sb, FM_CLEAN); - if (rc) { - jfs_err("jfs_freeze: updateSuper failed\n"); - /* - * Don't fail here. Everything succeeded except - * marking the superblock clean, so there's really - * no harm in leaving it frozen for now. - */ - } + lmLogShutdown(log); + updateSuper(sb, FM_CLEAN); } return 0; } @@ -644,18 +627,13 @@ static int jfs_unfreeze(struct super_block *sb) int rc = 0; if (!(sb->s_flags & MS_RDONLY)) { - rc = updateSuper(sb, FM_MOUNT); - if (rc) { - jfs_error(sb, "jfs_unfreeze: updateSuper failed"); - goto out; - } - rc = lmLogInit(log); - if (rc) - jfs_error(sb, "jfs_unfreeze: lmLogInit failed"); -out: - txResume(sb); + updateSuper(sb, FM_MOUNT); + if ((rc = lmLogInit(log))) + jfs_err("jfs_unlock failed with return code %d", rc); + else + txResume(sb); } - return rc; + return 0; } static struct dentry *jfs_do_mount(struct file_system_type *fs_type, diff --git a/trunk/fs/libfs.c b/trunk/fs/libfs.c index c3a0837fb861..916da8c4158b 100644 --- a/trunk/fs/libfs.c +++ b/trunk/fs/libfs.c @@ -135,40 +135,60 @@ static inline unsigned char dt_type(struct inode *inode) * both impossible due to the lock on directory. */ -int dcache_readdir(struct file *file, struct dir_context *ctx) +int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) { - struct dentry *dentry = file->f_path.dentry; - struct dentry *cursor = file->private_data; + struct dentry *dentry = filp->f_path.dentry; + struct dentry *cursor = filp->private_data; struct list_head *p, *q = &cursor->d_u.d_child; + ino_t ino; + int i = filp->f_pos; - if (!dir_emit_dots(file, ctx)) - return 0; - spin_lock(&dentry->d_lock); - if (ctx->pos == 2) - list_move(q, &dentry->d_subdirs); - - for (p = q->next; p != &dentry->d_subdirs; p = p->next) { - struct dentry *next = list_entry(p, struct dentry, d_u.d_child); - spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); - if (!simple_positive(next)) { - spin_unlock(&next->d_lock); - continue; - } + switch (i) { + case 0: + ino = dentry->d_inode->i_ino; + if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) + break; + filp->f_pos++; + i++; + /* fallthrough */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) + break; + filp->f_pos++; + i++; + /* fallthrough */ + default: + spin_lock(&dentry->d_lock); + if (filp->f_pos == 2) + list_move(q, &dentry->d_subdirs); - spin_unlock(&next->d_lock); - spin_unlock(&dentry->d_lock); - if (!dir_emit(ctx, next->d_name.name, next->d_name.len, - next->d_inode->i_ino, dt_type(next->d_inode))) - return 0; - spin_lock(&dentry->d_lock); - spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); - /* next is still alive */ - list_move(q, p); - spin_unlock(&next->d_lock); - p = q; - ctx->pos++; + for (p=q->next; p != &dentry->d_subdirs; p=p->next) { + struct dentry *next; + next = list_entry(p, struct dentry, d_u.d_child); + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); + if (!simple_positive(next)) { + spin_unlock(&next->d_lock); + continue; + } + + spin_unlock(&next->d_lock); + spin_unlock(&dentry->d_lock); + if (filldir(dirent, next->d_name.name, + next->d_name.len, filp->f_pos, + next->d_inode->i_ino, + dt_type(next->d_inode)) < 0) + return 0; + spin_lock(&dentry->d_lock); + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); + /* next is still alive */ + list_move(q, p); + spin_unlock(&next->d_lock); + p = q; + filp->f_pos++; + } + spin_unlock(&dentry->d_lock); } - spin_unlock(&dentry->d_lock); return 0; } @@ -182,7 +202,7 @@ const struct file_operations simple_dir_operations = { .release = dcache_dir_close, .llseek = dcache_dir_lseek, .read = generic_read_dir, - .iterate = dcache_readdir, + .readdir = dcache_readdir, .fsync = noop_fsync, }; diff --git a/trunk/fs/logfs/dir.c b/trunk/fs/logfs/dir.c index 6bdc347008f5..b82751082112 100644 --- a/trunk/fs/logfs/dir.c +++ b/trunk/fs/logfs/dir.c @@ -281,23 +281,17 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry) /* FIXME: readdir currently has it's own dir_walk code. I don't see a good * way to combine the two copies */ -static int logfs_readdir(struct file *file, struct dir_context *ctx) +#define IMPLICIT_NODES 2 +static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir) { struct inode *dir = file_inode(file); - loff_t pos; + loff_t pos = file->f_pos - IMPLICIT_NODES; struct page *page; struct logfs_disk_dentry *dd; + int full; - if (ctx->pos < 0) - return -EINVAL; - - if (!dir_emit_dots(file, ctx)) - return 0; - - pos = ctx->pos - 2; BUG_ON(pos < 0); - for (;; pos++, ctx->pos++) { - bool full; + for (;; pos++) { if (beyond_eof(dir, pos)) break; if (!logfs_exist_block(dir, pos)) { @@ -312,17 +306,42 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx) dd = kmap(page); BUG_ON(dd->namelen == 0); - full = !dir_emit(ctx, (char *)dd->name, - be16_to_cpu(dd->namelen), - be64_to_cpu(dd->ino), dd->type); + full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), + pos, be64_to_cpu(dd->ino), dd->type); kunmap(page); page_cache_release(page); if (full) break; } + + file->f_pos = pos + IMPLICIT_NODES; return 0; } +static int logfs_readdir(struct file *file, void *buf, filldir_t filldir) +{ + struct inode *inode = file_inode(file); + ino_t pino = parent_ino(file->f_dentry); + int err; + + if (file->f_pos < 0) + return -EINVAL; + + if (file->f_pos == 0) { + if (filldir(buf, ".", 1, 1, inode->i_ino, DT_DIR) < 0) + return 0; + file->f_pos++; + } + if (file->f_pos == 1) { + if (filldir(buf, "..", 2, 2, pino, DT_DIR) < 0) + return 0; + file->f_pos++; + } + + err = __logfs_readdir(file, buf, filldir); + return err; +} + static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name) { dd->namelen = cpu_to_be16(name->len); @@ -795,7 +814,7 @@ const struct inode_operations logfs_dir_iops = { const struct file_operations logfs_dir_fops = { .fsync = logfs_fsync, .unlocked_ioctl = logfs_ioctl, - .iterate = logfs_readdir, + .readdir = logfs_readdir, .read = generic_read_dir, .llseek = default_llseek, }; diff --git a/trunk/fs/logfs/file.c b/trunk/fs/logfs/file.c index 57914fc32b62..c2219a6dd3c8 100644 --- a/trunk/fs/logfs/file.c +++ b/trunk/fs/logfs/file.c @@ -159,8 +159,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc) return __logfs_writepage(page); } -static void logfs_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void logfs_invalidatepage(struct page *page, unsigned long offset) { struct logfs_block *block = logfs_block(page); diff --git a/trunk/fs/logfs/segment.c b/trunk/fs/logfs/segment.c index d448a777166b..038da0991794 100644 --- a/trunk/fs/logfs/segment.c +++ b/trunk/fs/logfs/segment.c @@ -884,8 +884,7 @@ static struct logfs_area *alloc_area(struct super_block *sb) return area; } -static void map_invalidatepage(struct page *page, unsigned int o, - unsigned int l) +static void map_invalidatepage(struct page *page, unsigned long l) { return; } diff --git a/trunk/fs/minix/dir.c b/trunk/fs/minix/dir.c index 08c442902fcd..a9ed6f36e6ea 100644 --- a/trunk/fs/minix/dir.c +++ b/trunk/fs/minix/dir.c @@ -16,12 +16,12 @@ typedef struct minix_dir_entry minix_dirent; typedef struct minix3_dir_entry minix3_dirent; -static int minix_readdir(struct file *, struct dir_context *); +static int minix_readdir(struct file *, void *, filldir_t); const struct file_operations minix_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = minix_readdir, + .readdir = minix_readdir, .fsync = generic_file_fsync, }; @@ -82,23 +82,22 @@ static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) return (void*)((char*)de + sbi->s_dirsize); } -static int minix_readdir(struct file *file, struct dir_context *ctx) +static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + unsigned long pos = filp->f_pos; + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; + unsigned offset = pos & ~PAGE_CACHE_MASK; + unsigned long n = pos >> PAGE_CACHE_SHIFT; + unsigned long npages = dir_pages(inode); struct minix_sb_info *sbi = minix_sb(sb); unsigned chunk_size = sbi->s_dirsize; - unsigned long npages = dir_pages(inode); - unsigned long pos = ctx->pos; - unsigned offset; - unsigned long n; + char *name; + __u32 inumber; - ctx->pos = pos = (pos + chunk_size-1) & ~(chunk_size-1); + pos = (pos + chunk_size-1) & ~(chunk_size-1); if (pos >= inode->i_size) - return 0; - - offset = pos & ~PAGE_CACHE_MASK; - n = pos >> PAGE_CACHE_SHIFT; + goto done; for ( ; n < npages; n++, offset = 0) { char *p, *kaddr, *limit; @@ -110,8 +109,6 @@ static int minix_readdir(struct file *file, struct dir_context *ctx) p = kaddr+offset; limit = kaddr + minix_last_byte(inode, n) - chunk_size; for ( ; p <= limit; p = minix_next_entry(p, sbi)) { - const char *name; - __u32 inumber; if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; name = de3->name; @@ -122,17 +119,24 @@ static int minix_readdir(struct file *file, struct dir_context *ctx) inumber = de->inode; } if (inumber) { + int over; + unsigned l = strnlen(name, sbi->s_namelen); - if (!dir_emit(ctx, name, l, - inumber, DT_UNKNOWN)) { + offset = p - kaddr; + over = filldir(dirent, name, l, + (n << PAGE_CACHE_SHIFT) | offset, + inumber, DT_UNKNOWN); + if (over) { dir_put_page(page); - return 0; + goto done; } } - ctx->pos += chunk_size; } dir_put_page(page); } + +done: + filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset; return 0; } diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index 9ed9361223c0..85e40d1c0a8f 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -1976,7 +1976,7 @@ static int path_lookupat(int dfd, const char *name, err = complete_walk(nd); if (!err && nd->flags & LOOKUP_DIRECTORY) { - if (!can_lookup(nd->inode)) { + if (!nd->inode->i_op->lookup) { path_put(&nd->path); err = -ENOTDIR; } @@ -2850,7 +2850,7 @@ static int do_last(struct nameidata *nd, struct path *path, if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode)) goto out; error = -ENOTDIR; - if ((nd->flags & LOOKUP_DIRECTORY) && !can_lookup(nd->inode)) + if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup) goto out; audit_inode(name, nd->path.dentry, 0); finish_open: diff --git a/trunk/fs/ncpfs/dir.c b/trunk/fs/ncpfs/dir.c index 0e7f00298213..816326093656 100644 --- a/trunk/fs/ncpfs/dir.c +++ b/trunk/fs/ncpfs/dir.c @@ -23,12 +23,12 @@ #include "ncp_fs.h" -static void ncp_read_volume_list(struct file *, struct dir_context *, +static void ncp_read_volume_list(struct file *, void *, filldir_t, struct ncp_cache_control *); -static void ncp_do_readdir(struct file *, struct dir_context *, +static void ncp_do_readdir(struct file *, void *, filldir_t, struct ncp_cache_control *); -static int ncp_readdir(struct file *, struct dir_context *); +static int ncp_readdir(struct file *, void *, filldir_t); static int ncp_create(struct inode *, struct dentry *, umode_t, bool); static struct dentry *ncp_lookup(struct inode *, struct dentry *, unsigned int); @@ -49,7 +49,7 @@ const struct file_operations ncp_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = ncp_readdir, + .readdir = ncp_readdir, .unlocked_ioctl = ncp_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ncp_compat_ioctl, @@ -424,9 +424,9 @@ static time_t ncp_obtain_mtime(struct dentry *dentry) return ncp_date_dos2unix(i.modifyTime, i.modifyDate); } -static int ncp_readdir(struct file *file, struct dir_context *ctx) +static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; struct page *page = NULL; struct ncp_server *server = NCP_SERVER(inode); @@ -440,7 +440,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) DDPRINTK("ncp_readdir: reading %s/%s, pos=%d\n", dentry->d_parent->d_name.name, dentry->d_name.name, - (int) ctx->pos); + (int) filp->f_pos); result = -EIO; /* Do not generate '.' and '..' when server is dead. */ @@ -448,8 +448,16 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) goto out; result = 0; - if (!dir_emit_dots(file, ctx)) - goto out; + if (filp->f_pos == 0) { + if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) + goto out; + filp->f_pos = 1; + } + if (filp->f_pos == 1) { + if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR)) + goto out; + filp->f_pos = 2; + } page = grab_cache_page(&inode->i_data, 0); if (!page) @@ -461,7 +469,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) if (!PageUptodate(page) || !ctl.head.eof) goto init_cache; - if (ctx->pos == 2) { + if (filp->f_pos == 2) { if (jiffies - ctl.head.time >= NCP_MAX_AGE(server)) goto init_cache; @@ -471,10 +479,10 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) goto init_cache; } - if (ctx->pos > ctl.head.end) + if (filp->f_pos > ctl.head.end) goto finished; - ctl.fpos = ctx->pos + (NCP_DIRCACHE_START - 2); + ctl.fpos = filp->f_pos + (NCP_DIRCACHE_START - 2); ctl.ofs = ctl.fpos / NCP_DIRCACHE_SIZE; ctl.idx = ctl.fpos % NCP_DIRCACHE_SIZE; @@ -489,21 +497,21 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) } while (ctl.idx < NCP_DIRCACHE_SIZE) { struct dentry *dent; - bool over; + int res; dent = ncp_dget_fpos(ctl.cache->dentry[ctl.idx], - dentry, ctx->pos); + dentry, filp->f_pos); if (!dent) goto invalid_cache; - over = !dir_emit(ctx, dent->d_name.name, - dent->d_name.len, + res = filldir(dirent, dent->d_name.name, + dent->d_name.len, filp->f_pos, dent->d_inode->i_ino, DT_UNKNOWN); dput(dent); - if (over) + if (res) goto finished; - ctx->pos += 1; + filp->f_pos += 1; ctl.idx += 1; - if (ctx->pos > ctl.head.end) + if (filp->f_pos > ctl.head.end) goto finished; } if (ctl.page) { @@ -540,9 +548,9 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) ctl.valid = 1; read_really: if (ncp_is_server_root(inode)) { - ncp_read_volume_list(file, ctx, &ctl); + ncp_read_volume_list(filp, dirent, filldir, &ctl); } else { - ncp_do_readdir(file, ctx, &ctl); + ncp_do_readdir(filp, dirent, filldir, &ctl); } ctl.head.end = ctl.fpos - 1; ctl.head.eof = ctl.valid; @@ -565,11 +573,11 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) } static int -ncp_fill_cache(struct file *file, struct dir_context *ctx, +ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir, struct ncp_cache_control *ctrl, struct ncp_entry_info *entry, int inval_childs) { - struct dentry *newdent, *dentry = file->f_path.dentry; + struct dentry *newdent, *dentry = filp->f_path.dentry; struct inode *dir = dentry->d_inode; struct ncp_cache_control ctl = *ctrl; struct qstr qname; @@ -658,15 +666,15 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, end_advance: if (!valid) ctl.valid = 0; - if (!ctl.filled && (ctl.fpos == ctx->pos)) { + if (!ctl.filled && (ctl.fpos == filp->f_pos)) { if (!ino) ino = find_inode_number(dentry, &qname); if (!ino) ino = iunique(dir->i_sb, 2); - ctl.filled = !dir_emit(ctx, qname.name, qname.len, - ino, DT_UNKNOWN); + ctl.filled = filldir(dirent, qname.name, qname.len, + filp->f_pos, ino, DT_UNKNOWN); if (!ctl.filled) - ctx->pos += 1; + filp->f_pos += 1; } ctl.fpos += 1; ctl.idx += 1; @@ -675,10 +683,10 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, } static void -ncp_read_volume_list(struct file *file, struct dir_context *ctx, +ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir, struct ncp_cache_control *ctl) { - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; struct ncp_server *server = NCP_SERVER(inode); struct ncp_volume_info info; @@ -686,7 +694,7 @@ ncp_read_volume_list(struct file *file, struct dir_context *ctx, int i; DPRINTK("ncp_read_volume_list: pos=%ld\n", - (unsigned long) ctx->pos); + (unsigned long) filp->f_pos); for (i = 0; i < NCP_NUMBER_OF_VOLUMES; i++) { int inval_dentry; @@ -707,16 +715,16 @@ ncp_read_volume_list(struct file *file, struct dir_context *ctx, } inval_dentry = ncp_update_known_namespace(server, entry.i.volNumber, NULL); entry.volume = entry.i.volNumber; - if (!ncp_fill_cache(file, ctx, ctl, &entry, inval_dentry)) + if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, inval_dentry)) return; } } static void -ncp_do_readdir(struct file *file, struct dir_context *ctx, +ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir, struct ncp_cache_control *ctl) { - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = filp->f_path.dentry; struct inode *dir = dentry->d_inode; struct ncp_server *server = NCP_SERVER(dir); struct nw_search_sequence seq; @@ -728,7 +736,7 @@ ncp_do_readdir(struct file *file, struct dir_context *ctx, DPRINTK("ncp_do_readdir: %s/%s, fpos=%ld\n", dentry->d_parent->d_name.name, dentry->d_name.name, - (unsigned long) ctx->pos); + (unsigned long) filp->f_pos); PPRINTK("ncp_do_readdir: init %s, volnum=%d, dirent=%u\n", dentry->d_name.name, NCP_FINFO(dir)->volNumber, NCP_FINFO(dir)->dirEntNum); @@ -770,7 +778,7 @@ ncp_do_readdir(struct file *file, struct dir_context *ctx, rpl += onerpl; rpls -= onerpl; entry.volume = entry.i.volNumber; - if (!ncp_fill_cache(file, ctx, ctl, &entry, 0)) + if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, 0)) break; } } while (more); @@ -1021,6 +1029,15 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry) DPRINTK("ncp_rmdir: removing %s/%s\n", dentry->d_parent->d_name.name, dentry->d_name.name); + /* + * fail with EBUSY if there are still references to this + * directory. + */ + dentry_unhash(dentry); + error = -EBUSY; + if (!d_unhashed(dentry)) + goto out; + len = sizeof(__name); error = ncp_io2vol(server, __name, &len, dentry->d_name.name, dentry->d_name.len, !ncp_preserve_case(dir)); diff --git a/trunk/fs/nfs/callback_proc.c b/trunk/fs/nfs/callback_proc.c index 0bc27684ebfa..a13d26ede254 100644 --- a/trunk/fs/nfs/callback_proc.c +++ b/trunk/fs/nfs/callback_proc.c @@ -414,7 +414,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, spin_lock(&tbl->slot_tbl_lock); /* state manager is resetting the session */ - if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { + if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) { spin_unlock(&tbl->slot_tbl_lock); status = htonl(NFS4ERR_DELAY); /* Return NFS4ERR_BADSESSION if we're draining the session diff --git a/trunk/fs/nfs/callback_xdr.c b/trunk/fs/nfs/callback_xdr.c index a35582c9d444..59461c957d9d 100644 --- a/trunk/fs/nfs/callback_xdr.c +++ b/trunk/fs/nfs/callback_xdr.c @@ -763,7 +763,7 @@ static void nfs4_callback_free_slot(struct nfs4_session *session) * A single slot, so highest used slotid is either 0 or -1 */ tbl->highest_used_slotid = NFS4_NO_SLOT; - nfs4_slot_tbl_drain_complete(tbl); + nfs4_session_drain_complete(session, tbl); spin_unlock(&tbl->slot_tbl_lock); } diff --git a/trunk/fs/nfs/dir.c b/trunk/fs/nfs/dir.c index 5d051419527b..e093e73178b7 100644 --- a/trunk/fs/nfs/dir.c +++ b/trunk/fs/nfs/dir.c @@ -46,7 +46,7 @@ static int nfs_opendir(struct inode *, struct file *); static int nfs_closedir(struct inode *, struct file *); -static int nfs_readdir(struct file *, struct dir_context *); +static int nfs_readdir(struct file *, void *, filldir_t); static int nfs_fsync_dir(struct file *, loff_t, loff_t, int); static loff_t nfs_llseek_dir(struct file *, loff_t, int); static void nfs_readdir_clear_array(struct page*); @@ -54,7 +54,7 @@ static void nfs_readdir_clear_array(struct page*); const struct file_operations nfs_dir_operations = { .llseek = nfs_llseek_dir, .read = generic_read_dir, - .iterate = nfs_readdir, + .readdir = nfs_readdir, .open = nfs_opendir, .release = nfs_closedir, .fsync = nfs_fsync_dir, @@ -147,7 +147,6 @@ typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int); typedef struct { struct file *file; struct page *page; - struct dir_context *ctx; unsigned long page_index; u64 *dir_cookie; u64 last_cookie; @@ -253,7 +252,7 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page) static int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc) { - loff_t diff = desc->ctx->pos - desc->current_index; + loff_t diff = desc->file->f_pos - desc->current_index; unsigned int index; if (diff < 0) @@ -290,7 +289,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des || (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))) { ctx->duped = 0; ctx->attr_gencount = nfsi->attr_gencount; - } else if (new_pos < desc->ctx->pos) { + } else if (new_pos < desc->file->f_pos) { if (ctx->duped > 0 && ctx->dup_cookie == *desc->dir_cookie) { if (printk_ratelimit()) { @@ -308,7 +307,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des ctx->dup_cookie = *desc->dir_cookie; ctx->duped = -1; } - desc->ctx->pos = new_pos; + desc->file->f_pos = new_pos; desc->cache_entry_index = i; return 0; } @@ -406,13 +405,13 @@ int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) } static -bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx) +bool nfs_use_readdirplus(struct inode *dir, struct file *filp) { if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS)) return false; if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags)) return true; - if (ctx->pos == 0) + if (filp->f_pos == 0) return true; return false; } @@ -703,7 +702,8 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc) * Once we've found the start of the dirent within a page: fill 'er up... */ static -int nfs_do_filldir(nfs_readdir_descriptor_t *desc) +int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent, + filldir_t filldir) { struct file *file = desc->file; int i = 0; @@ -721,12 +721,13 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc) struct nfs_cache_array_entry *ent; ent = &array->array[i]; - if (!dir_emit(desc->ctx, ent->string.name, ent->string.len, - nfs_compat_user_ino64(ent->ino), ent->d_type)) { + if (filldir(dirent, ent->string.name, ent->string.len, + file->f_pos, nfs_compat_user_ino64(ent->ino), + ent->d_type) < 0) { desc->eof = 1; break; } - desc->ctx->pos++; + file->f_pos++; if (i < (array->size-1)) *desc->dir_cookie = array->array[i+1].cookie; else @@ -758,7 +759,8 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc) * directory in the page cache by the time we get here. */ static inline -int uncached_readdir(nfs_readdir_descriptor_t *desc) +int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, + filldir_t filldir) { struct page *page = NULL; int status; @@ -783,7 +785,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc) if (status < 0) goto out_release; - status = nfs_do_filldir(desc); + status = nfs_do_filldir(desc, dirent, filldir); out: dfprintk(DIRCACHE, "NFS: %s: returns %d\n", @@ -798,36 +800,35 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc) last cookie cache takes care of the common case of reading the whole directory. */ -static int nfs_readdir(struct file *file, struct dir_context *ctx) +static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; nfs_readdir_descriptor_t my_desc, *desc = &my_desc; - struct nfs_open_dir_context *dir_ctx = file->private_data; + struct nfs_open_dir_context *dir_ctx = filp->private_data; int res; dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n", dentry->d_parent->d_name.name, dentry->d_name.name, - (long long)ctx->pos); + (long long)filp->f_pos); nfs_inc_stats(inode, NFSIOS_VFSGETDENTS); /* - * ctx->pos points to the dirent entry number. + * filp->f_pos points to the dirent entry number. * *desc->dir_cookie has the cookie for the next entry. We have * to either find the entry with the appropriate number or * revalidate the cookie. */ memset(desc, 0, sizeof(*desc)); - desc->file = file; - desc->ctx = ctx; + desc->file = filp; desc->dir_cookie = &dir_ctx->dir_cookie; desc->decode = NFS_PROTO(inode)->decode_dirent; - desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0; + desc->plus = nfs_use_readdirplus(inode, filp) ? 1 : 0; nfs_block_sillyrename(dentry); - res = nfs_revalidate_mapping(inode, file->f_mapping); + res = nfs_revalidate_mapping(inode, filp->f_mapping); if (res < 0) goto out; @@ -839,7 +840,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) /* This means either end of directory */ if (*desc->dir_cookie && desc->eof == 0) { /* Or that the server has 'lost' a cookie */ - res = uncached_readdir(desc); + res = uncached_readdir(desc, dirent, filldir); if (res == 0) continue; } @@ -856,7 +857,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) if (res < 0) break; - res = nfs_do_filldir(desc); + res = nfs_do_filldir(desc, dirent, filldir); if (res < 0) break; } while (!desc->eof); diff --git a/trunk/fs/nfs/file.c b/trunk/fs/nfs/file.c index 6b4a79f4ad1d..a87a44f84113 100644 --- a/trunk/fs/nfs/file.c +++ b/trunk/fs/nfs/file.c @@ -451,13 +451,11 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, * - Called if either PG_private or PG_fscache is set on the page * - Caller holds page lock */ -static void nfs_invalidate_page(struct page *page, unsigned int offset, - unsigned int length) +static void nfs_invalidate_page(struct page *page, unsigned long offset) { - dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", - page, offset, length); + dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset); - if (offset != 0 || length < PAGE_CACHE_SIZE) + if (offset != 0) return; /* Cancel any unstarted writes on this page */ nfs_wb_page_cancel(page_file_mapping(page)->host, page); diff --git a/trunk/fs/nfs/nfs4client.c b/trunk/fs/nfs/nfs4client.c index 4cbad5d6b276..947b0c908aa9 100644 --- a/trunk/fs/nfs/nfs4client.c +++ b/trunk/fs/nfs/nfs4client.c @@ -203,7 +203,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp, __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags); error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I); if (error == -EINVAL) - error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX); + error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_NULL); if (error < 0) goto error; diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c index d7ba5616989c..8fbc10054115 100644 --- a/trunk/fs/nfs/nfs4proc.c +++ b/trunk/fs/nfs/nfs4proc.c @@ -572,7 +572,7 @@ int nfs41_setup_sequence(struct nfs4_session *session, task->tk_timeout = 0; spin_lock(&tbl->slot_tbl_lock); - if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && + if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && !args->sa_privileged) { /* The state manager will wait until the slot table is empty */ dprintk("%s session is draining\n", __func__); @@ -1078,7 +1078,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) struct nfs4_state *state = opendata->state; struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *delegation; - int open_mode = opendata->o_arg.open_flags; + int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); fmode_t fmode = opendata->o_arg.fmode; nfs4_stateid stateid; int ret = -EAGAIN; diff --git a/trunk/fs/nfs/nfs4session.c b/trunk/fs/nfs/nfs4session.c index c4e225e4a9af..ebda5f4a031b 100644 --- a/trunk/fs/nfs/nfs4session.c +++ b/trunk/fs/nfs/nfs4session.c @@ -73,7 +73,7 @@ void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot) tbl->highest_used_slotid = new_max; else { tbl->highest_used_slotid = NFS4_NO_SLOT; - nfs4_slot_tbl_drain_complete(tbl); + nfs4_session_drain_complete(tbl->session, tbl); } } dprintk("%s: slotid %u highest_used_slotid %d\n", __func__, @@ -226,7 +226,7 @@ static bool nfs41_assign_slot(struct rpc_task *task, void *pslot) struct nfs4_slot *slot = pslot; struct nfs4_slot_table *tbl = slot->table; - if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) + if (nfs4_session_draining(tbl->session) && !args->sa_privileged) return false; slot->generation = tbl->generation; args->sa_slot = slot; diff --git a/trunk/fs/nfs/nfs4session.h b/trunk/fs/nfs/nfs4session.h index ff7d9f0f8a65..6f3cb39386d4 100644 --- a/trunk/fs/nfs/nfs4session.h +++ b/trunk/fs/nfs/nfs4session.h @@ -25,10 +25,6 @@ struct nfs4_slot { }; /* Sessions */ -enum nfs4_slot_tbl_state { - NFS4_SLOT_TBL_DRAINING, -}; - #define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long)) struct nfs4_slot_table { struct nfs4_session *session; /* Parent session */ @@ -47,7 +43,6 @@ struct nfs4_slot_table { unsigned long generation; /* Generation counter for target_highest_slotid */ struct completion complete; - unsigned long slot_tbl_state; }; /* @@ -73,6 +68,7 @@ struct nfs4_session { enum nfs4_session_state { NFS4_SESSION_INITING, + NFS4_SESSION_DRAINING, }; #if defined(CONFIG_NFS_V4_1) @@ -92,11 +88,12 @@ extern void nfs4_destroy_session(struct nfs4_session *session); extern int nfs4_init_session(struct nfs_server *server); extern int nfs4_init_ds_session(struct nfs_client *, unsigned long); -extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl); +extern void nfs4_session_drain_complete(struct nfs4_session *session, + struct nfs4_slot_table *tbl); -static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl) +static inline bool nfs4_session_draining(struct nfs4_session *session) { - return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); + return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state); } bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, diff --git a/trunk/fs/nfs/nfs4state.c b/trunk/fs/nfs/nfs4state.c index 1fab140764c4..300d17d85c0e 100644 --- a/trunk/fs/nfs/nfs4state.c +++ b/trunk/fs/nfs/nfs4state.c @@ -241,7 +241,7 @@ static void nfs4_end_drain_session(struct nfs_client *clp) if (ses == NULL) return; tbl = &ses->fc_slot_table; - if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { + if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { spin_lock(&tbl->slot_tbl_lock); nfs41_wake_slot_table(tbl); spin_unlock(&tbl->slot_tbl_lock); @@ -251,15 +251,15 @@ static void nfs4_end_drain_session(struct nfs_client *clp) /* * Signal state manager thread if session fore channel is drained */ -void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl) +void nfs4_session_drain_complete(struct nfs4_session *session, + struct nfs4_slot_table *tbl) { - if (nfs4_slot_tbl_draining(tbl)) + if (nfs4_session_draining(session)) complete(&tbl->complete); } -static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl) +static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl) { - set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); spin_lock(&tbl->slot_tbl_lock); if (tbl->highest_used_slotid != NFS4_NO_SLOT) { INIT_COMPLETION(tbl->complete); @@ -275,12 +275,13 @@ static int nfs4_begin_drain_session(struct nfs_client *clp) struct nfs4_session *ses = clp->cl_session; int ret = 0; + set_bit(NFS4_SESSION_DRAINING, &ses->session_state); /* back channel */ - ret = nfs4_drain_slot_tbl(&ses->bc_slot_table); + ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table); if (ret) return ret; /* fore channel */ - return nfs4_drain_slot_tbl(&ses->fc_slot_table); + return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); } static void nfs41_finish_session_reset(struct nfs_client *clp) diff --git a/trunk/fs/nfs/super.c b/trunk/fs/nfs/super.c index 2d7525fbcf25..a366107a7331 100644 --- a/trunk/fs/nfs/super.c +++ b/trunk/fs/nfs/super.c @@ -1942,7 +1942,6 @@ static int nfs23_validate_mount_data(void *options, args->namlen = data->namlen; args->bsize = data->bsize; - args->auth_flavors[0] = RPC_AUTH_UNIX; if (data->flags & NFS_MOUNT_SECFLAVOUR) args->auth_flavors[0] = data->pseudoflavor; if (!args->nfs_server.hostname) @@ -2638,7 +2637,6 @@ static int nfs4_validate_mount_data(void *options, goto out_no_address; args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port); - args->auth_flavors[0] = RPC_AUTH_UNIX; if (data->auth_flavourlen) { if (data->auth_flavourlen > 1) goto out_inval_auth; diff --git a/trunk/fs/nfsd/nfs4recover.c b/trunk/fs/nfsd/nfs4recover.c index 105a3b080d12..4e9a21db867a 100644 --- a/trunk/fs/nfsd/nfs4recover.c +++ b/trunk/fs/nfsd/nfs4recover.c @@ -240,16 +240,11 @@ struct name_list { struct list_head list; }; -struct nfs4_dir_ctx { - struct dir_context ctx; - struct list_head names; -}; - static int nfsd4_build_namelist(void *arg, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { - struct nfs4_dir_ctx *ctx = arg; + struct list_head *names = arg; struct name_list *entry; if (namlen != HEXDIR_LEN - 1) @@ -259,7 +254,7 @@ nfsd4_build_namelist(void *arg, const char *name, int namlen, return -ENOMEM; memcpy(entry->name, name, HEXDIR_LEN - 1); entry->name[HEXDIR_LEN - 1] = '\0'; - list_add(&entry->list, &ctx->names); + list_add(&entry->list, names); return 0; } @@ -268,10 +263,7 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn) { const struct cred *original_cred; struct dentry *dir = nn->rec_file->f_path.dentry; - struct nfs4_dir_ctx ctx = { - .ctx.actor = nfsd4_build_namelist, - .names = LIST_HEAD_INIT(ctx.names) - }; + LIST_HEAD(names); int status; status = nfs4_save_creds(&original_cred); @@ -284,11 +276,11 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn) return status; } - status = iterate_dir(nn->rec_file, &ctx.ctx); + status = vfs_readdir(nn->rec_file, nfsd4_build_namelist, &names); mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); - while (!list_empty(&ctx.names)) { + while (!list_empty(&names)) { struct name_list *entry; - entry = list_entry(ctx.names.next, struct name_list, list); + entry = list_entry(names.next, struct name_list, list); if (!status) { struct dentry *dentry; dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1); diff --git a/trunk/fs/nfsd/vfs.c b/trunk/fs/nfsd/vfs.c index a6bc8a7423db..84ce601d8063 100644 --- a/trunk/fs/nfsd/vfs.c +++ b/trunk/fs/nfsd/vfs.c @@ -1912,7 +1912,6 @@ struct buffered_dirent { }; struct readdir_data { - struct dir_context ctx; char *dirent; size_t used; int full; @@ -1944,15 +1943,13 @@ static int nfsd_buffered_filldir(void *__buf, const char *name, int namlen, static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func, struct readdir_cd *cdp, loff_t *offsetp) { + struct readdir_data buf; struct buffered_dirent *de; int host_err; int size; loff_t offset; - struct readdir_data buf = { - .ctx.actor = nfsd_buffered_filldir, - .dirent = (void *)__get_free_page(GFP_KERNEL) - }; + buf.dirent = (void *)__get_free_page(GFP_KERNEL); if (!buf.dirent) return nfserrno(-ENOMEM); @@ -1966,7 +1963,7 @@ static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func, buf.used = 0; buf.full = 0; - host_err = iterate_dir(file, &buf.ctx); + host_err = vfs_readdir(file, nfsd_buffered_filldir, &buf); if (buf.full) host_err = 0; diff --git a/trunk/fs/nilfs2/dir.c b/trunk/fs/nilfs2/dir.c index 197a63e9d102..f30b017740a7 100644 --- a/trunk/fs/nilfs2/dir.c +++ b/trunk/fs/nilfs2/dir.c @@ -256,18 +256,22 @@ static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode) de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; } -static int nilfs_readdir(struct file *file, struct dir_context *ctx) +static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - loff_t pos = ctx->pos; - struct inode *inode = file_inode(file); + loff_t pos = filp->f_pos; + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); /* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */ + unsigned char *types = NULL; + int ret; if (pos > inode->i_size - NILFS_DIR_REC_LEN(1)) - return 0; + goto success; + + types = nilfs_filetype_table; for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; @@ -277,8 +281,9 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx) if (IS_ERR(page)) { nilfs_error(sb, __func__, "bad page in #%lu", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; - return -EIO; + filp->f_pos += PAGE_CACHE_SIZE - offset; + ret = -EIO; + goto done; } kaddr = page_address(page); de = (struct nilfs_dir_entry *)(kaddr + offset); @@ -288,28 +293,35 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx) if (de->rec_len == 0) { nilfs_error(sb, __func__, "zero-length directory entry"); + ret = -EIO; nilfs_put_page(page); - return -EIO; + goto done; } if (de->inode) { - unsigned char t; + int over; + unsigned char d_type = DT_UNKNOWN; - if (de->file_type < NILFS_FT_MAX) - t = nilfs_filetype_table[de->file_type]; - else - t = DT_UNKNOWN; + if (types && de->file_type < NILFS_FT_MAX) + d_type = types[de->file_type]; - if (!dir_emit(ctx, de->name, de->name_len, - le64_to_cpu(de->inode), t)) { + offset = (char *)de - kaddr; + over = filldir(dirent, de->name, de->name_len, + (n<inode), d_type); + if (over) { nilfs_put_page(page); - return 0; + goto success; } } - ctx->pos += nilfs_rec_len_from_disk(de->rec_len); + filp->f_pos += nilfs_rec_len_from_disk(de->rec_len); } nilfs_put_page(page); } - return 0; + +success: + ret = 0; +done: + return ret; } /* @@ -666,7 +678,7 @@ int nilfs_empty_dir(struct inode *inode) const struct file_operations nilfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = nilfs_readdir, + .readdir = nilfs_readdir, .unlocked_ioctl = nilfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = nilfs_compat_ioctl, diff --git a/trunk/fs/nilfs2/inode.c b/trunk/fs/nilfs2/inode.c index bccfec8343c5..689fb608648e 100644 --- a/trunk/fs/nilfs2/inode.c +++ b/trunk/fs/nilfs2/inode.c @@ -219,32 +219,13 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc) static int nilfs_set_page_dirty(struct page *page) { - int ret = __set_page_dirty_nobuffers(page); + int ret = __set_page_dirty_buffers(page); - if (page_has_buffers(page)) { + if (ret) { struct inode *inode = page->mapping->host; - unsigned nr_dirty = 0; - struct buffer_head *bh, *head; + unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); - /* - * This page is locked by callers, and no other thread - * concurrently marks its buffers dirty since they are - * only dirtied through routines in fs/buffer.c in - * which call sites of mark_buffer_dirty are protected - * by page lock. - */ - bh = head = page_buffers(page); - do { - /* Do not mark hole blocks dirty */ - if (buffer_dirty(bh) || !buffer_mapped(bh)) - continue; - - set_buffer_dirty(bh); - nr_dirty++; - } while (bh = bh->b_this_page, bh != head); - - if (nr_dirty) - nilfs_set_file_dirty(inode, nr_dirty); + nilfs_set_file_dirty(inode, nr_dirty); } return ret; } diff --git a/trunk/fs/ntfs/aops.c b/trunk/fs/ntfs/aops.c index d267ea6aa1a0..fa9c05f97af4 100644 --- a/trunk/fs/ntfs/aops.c +++ b/trunk/fs/ntfs/aops.c @@ -1372,7 +1372,7 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc) * The page may have dirty, unmapped buffers. Make them * freeable here, so the page does not leak. */ - block_invalidatepage(page, 0, PAGE_CACHE_SIZE); + block_invalidatepage(page, 0); unlock_page(page); ntfs_debug("Write outside i_size - truncated?"); return 0; diff --git a/trunk/fs/ntfs/dir.c b/trunk/fs/ntfs/dir.c index 9e38dafa3bc7..aa411c3f20e9 100644 --- a/trunk/fs/ntfs/dir.c +++ b/trunk/fs/ntfs/dir.c @@ -1004,11 +1004,13 @@ u64 ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname, /** * ntfs_filldir - ntfs specific filldir method * @vol: current ntfs volume + * @fpos: position in the directory * @ndir: ntfs inode of current directory * @ia_page: page in which the index allocation buffer @ie is in resides * @ie: current index entry * @name: buffer to use for the converted name - * @actor: what to feed the entries to + * @dirent: vfs filldir callback context + * @filldir: vfs filldir callback * * Convert the Unicode @name to the loaded NLS and pass it to the @filldir * callback. @@ -1022,12 +1024,12 @@ u64 ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname, * retake the lock if we are returning a non-zero value as ntfs_readdir() * would need to drop the lock immediately anyway. */ -static inline int ntfs_filldir(ntfs_volume *vol, +static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos, ntfs_inode *ndir, struct page *ia_page, INDEX_ENTRY *ie, - u8 *name, struct dir_context *actor) + u8 *name, void *dirent, filldir_t filldir) { unsigned long mref; - int name_len; + int name_len, rc; unsigned dt_type; FILE_NAME_TYPE_FLAGS name_type; @@ -1066,14 +1068,13 @@ static inline int ntfs_filldir(ntfs_volume *vol, if (ia_page) unlock_page(ia_page); ntfs_debug("Calling filldir for %s with len %i, fpos 0x%llx, inode " - "0x%lx, DT_%s.", name, name_len, actor->pos, mref, + "0x%lx, DT_%s.", name, name_len, fpos, mref, dt_type == DT_DIR ? "DIR" : "REG"); - if (!dir_emit(actor, name, name_len, mref, dt_type)) - return 1; + rc = filldir(dirent, name, name_len, fpos, mref, dt_type); /* Relock the page but not if we are aborting ->readdir. */ - if (ia_page) + if (!rc && ia_page) lock_page(ia_page); - return 0; + return rc; } /* @@ -1096,11 +1097,11 @@ static inline int ntfs_filldir(ntfs_volume *vol, * removes them again after the write is complete after which it * unlocks the page. */ -static int ntfs_readdir(struct file *file, struct dir_context *actor) +static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { s64 ia_pos, ia_start, prev_ia_pos, bmp_pos; - loff_t i_size; - struct inode *bmp_vi, *vdir = file_inode(file); + loff_t fpos, i_size; + struct inode *bmp_vi, *vdir = file_inode(filp); struct super_block *sb = vdir->i_sb; ntfs_inode *ndir = NTFS_I(vdir); ntfs_volume *vol = NTFS_SB(sb); @@ -1115,16 +1116,33 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) u8 *kaddr, *bmp, *index_end; ntfs_attr_search_ctx *ctx; + fpos = filp->f_pos; ntfs_debug("Entering for inode 0x%lx, fpos 0x%llx.", - vdir->i_ino, actor->pos); + vdir->i_ino, fpos); rc = err = 0; /* Are we at end of dir yet? */ i_size = i_size_read(vdir); - if (actor->pos >= i_size + vol->mft_record_size) - return 0; + if (fpos >= i_size + vol->mft_record_size) + goto done; /* Emulate . and .. for all directories. */ - if (!dir_emit_dots(file, actor)) - return 0; + if (!fpos) { + ntfs_debug("Calling filldir for . with len 1, fpos 0x0, " + "inode 0x%lx, DT_DIR.", vdir->i_ino); + rc = filldir(dirent, ".", 1, fpos, vdir->i_ino, DT_DIR); + if (rc) + goto done; + fpos++; + } + if (fpos == 1) { + ntfs_debug("Calling filldir for .. with len 2, fpos 0x1, " + "inode 0x%lx, DT_DIR.", + (unsigned long)parent_ino(filp->f_path.dentry)); + rc = filldir(dirent, "..", 2, fpos, + parent_ino(filp->f_path.dentry), DT_DIR); + if (rc) + goto done; + fpos++; + } m = NULL; ctx = NULL; /* @@ -1137,7 +1155,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) goto err_out; } /* Are we jumping straight into the index allocation attribute? */ - if (actor->pos >= vol->mft_record_size) + if (fpos >= vol->mft_record_size) goto skip_index_root; /* Get hold of the mft record for the directory. */ m = map_mft_record(ndir); @@ -1152,7 +1170,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) goto err_out; } /* Get the offset into the index root attribute. */ - ir_pos = (s64)actor->pos; + ir_pos = (s64)fpos; /* Find the index root attribute in the mft record. */ err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL, 0, ctx); @@ -1208,9 +1226,10 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) if (ir_pos > (u8*)ie - (u8*)ir) continue; /* Advance the position even if going to skip the entry. */ - actor->pos = (u8*)ie - (u8*)ir; + fpos = (u8*)ie - (u8*)ir; /* Submit the name to the filldir callback. */ - rc = ntfs_filldir(vol, ndir, NULL, ie, name, actor); + rc = ntfs_filldir(vol, fpos, ndir, NULL, ie, name, dirent, + filldir); if (rc) { kfree(ir); goto abort; @@ -1223,12 +1242,12 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) if (!NInoIndexAllocPresent(ndir)) goto EOD; /* Advance fpos to the beginning of the index allocation. */ - actor->pos = vol->mft_record_size; + fpos = vol->mft_record_size; skip_index_root: kaddr = NULL; prev_ia_pos = -1LL; /* Get the offset into the index allocation attribute. */ - ia_pos = (s64)actor->pos - vol->mft_record_size; + ia_pos = (s64)fpos - vol->mft_record_size; ia_mapping = vdir->i_mapping; ntfs_debug("Inode 0x%lx, getting index bitmap.", vdir->i_ino); bmp_vi = ntfs_attr_iget(vdir, AT_BITMAP, I30, 4); @@ -1390,7 +1409,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) if (ia_pos - ia_start > (u8*)ie - (u8*)ia) continue; /* Advance the position even if going to skip the entry. */ - actor->pos = (u8*)ie - (u8*)ia + + fpos = (u8*)ie - (u8*)ia + (sle64_to_cpu(ia->index_block_vcn) << ndir->itype.index.vcn_size_bits) + vol->mft_record_size; @@ -1400,7 +1419,8 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) * before returning, unless a non-zero value is returned in * which case the page is left unlocked. */ - rc = ntfs_filldir(vol, ndir, ia_page, ie, name, actor); + rc = ntfs_filldir(vol, fpos, ndir, ia_page, ie, name, dirent, + filldir); if (rc) { /* @ia_page is already unlocked in this case. */ ntfs_unmap_page(ia_page); @@ -1419,9 +1439,18 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) iput(bmp_vi); EOD: /* We are finished, set fpos to EOD. */ - actor->pos = i_size + vol->mft_record_size; + fpos = i_size + vol->mft_record_size; abort: kfree(name); +done: +#ifdef DEBUG + if (!rc) + ntfs_debug("EOD, fpos 0x%llx, returning 0.", fpos); + else + ntfs_debug("filldir returned %i, fpos 0x%llx, returning 0.", + rc, fpos); +#endif + filp->f_pos = fpos; return 0; err_out: if (bmp_page) { @@ -1442,6 +1471,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor) if (!err) err = -EIO; ntfs_debug("Failed. Returning error code %i.", -err); + filp->f_pos = fpos; return err; } @@ -1541,7 +1571,7 @@ static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end, const struct file_operations ntfs_dir_ops = { .llseek = generic_file_llseek, /* Seek inside directory. */ .read = generic_read_dir, /* Return -EISDIR. */ - .iterate = ntfs_readdir, /* Read directory contents. */ + .readdir = ntfs_readdir, /* Read directory contents. */ #ifdef NTFS_RW .fsync = ntfs_dir_fsync, /* Sync a directory to disk. */ /*.aio_fsync = ,*/ /* Sync all outstanding async diff --git a/trunk/fs/ocfs2/aops.c b/trunk/fs/ocfs2/aops.c index 79736a28d84f..20dfec72e903 100644 --- a/trunk/fs/ocfs2/aops.c +++ b/trunk/fs/ocfs2/aops.c @@ -603,12 +603,11 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, * from ext3. PageChecked() bits have been removed as OCFS2 does not * do journalled data. */ -static void ocfs2_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void ocfs2_invalidatepage(struct page *page, unsigned long offset) { journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal; - jbd2_journal_invalidatepage(journal, page, offset, length); + jbd2_journal_invalidatepage(journal, page, offset); } static int ocfs2_releasepage(struct page *page, gfp_t wait) diff --git a/trunk/fs/ocfs2/dir.c b/trunk/fs/ocfs2/dir.c index eb760d8acd50..f1e1aed8f638 100644 --- a/trunk/fs/ocfs2/dir.c +++ b/trunk/fs/ocfs2/dir.c @@ -1761,10 +1761,11 @@ int __ocfs2_add_entry(handle_t *handle, static int ocfs2_dir_foreach_blk_id(struct inode *inode, u64 *f_version, - struct dir_context *ctx) + loff_t *f_pos, void *priv, + filldir_t filldir, int *filldir_err) { - int ret, i; - unsigned long offset = ctx->pos; + int ret, i, filldir_ret; + unsigned long offset = *f_pos; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_inline_data *data; @@ -1780,7 +1781,8 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode, di = (struct ocfs2_dinode *)di_bh->b_data; data = &di->id2.i_data; - while (ctx->pos < i_size_read(inode)) { + while (*f_pos < i_size_read(inode)) { +revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block @@ -1800,31 +1802,50 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode, break; i += le16_to_cpu(de->rec_len); } - ctx->pos = offset = i; + *f_pos = offset = i; *f_version = inode->i_version; } - de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos); - if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) { + de = (struct ocfs2_dir_entry *) (data->id_data + *f_pos); + if (!ocfs2_check_dir_entry(inode, de, di_bh, *f_pos)) { /* On error, skip the f_pos to the end. */ - ctx->pos = i_size_read(inode); - break; + *f_pos = i_size_read(inode); + goto out; } offset += le16_to_cpu(de->rec_len); if (le64_to_cpu(de->inode)) { + /* We might block in the next section + * if the data destination is + * currently swapped out. So, use a + * version stamp to detect whether or + * not the directory has been modified + * during the copy operation. + */ + u64 version = *f_version; unsigned char d_type = DT_UNKNOWN; if (de->file_type < OCFS2_FT_MAX) d_type = ocfs2_filetype_table[de->file_type]; - if (!dir_emit(ctx, de->name, de->name_len, - le64_to_cpu(de->inode), d_type)) - goto out; + filldir_ret = filldir(priv, de->name, + de->name_len, + *f_pos, + le64_to_cpu(de->inode), + d_type); + if (filldir_ret) { + if (filldir_err) + *filldir_err = filldir_ret; + break; + } + if (version != *f_version) + goto revalidate; } - ctx->pos += le16_to_cpu(de->rec_len); + *f_pos += le16_to_cpu(de->rec_len); } + out: brelse(di_bh); + return 0; } @@ -1834,26 +1855,27 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode, */ static int ocfs2_dir_foreach_blk_el(struct inode *inode, u64 *f_version, - struct dir_context *ctx, - bool persist) + loff_t *f_pos, void *priv, + filldir_t filldir, int *filldir_err) { + int error = 0; unsigned long offset, blk, last_ra_blk = 0; - int i; + int i, stored; struct buffer_head * bh, * tmp; struct ocfs2_dir_entry * de; struct super_block * sb = inode->i_sb; unsigned int ra_sectors = 16; - int stored = 0; + stored = 0; bh = NULL; - offset = ctx->pos & (sb->s_blocksize - 1); + offset = (*f_pos) & (sb->s_blocksize - 1); - while (ctx->pos < i_size_read(inode)) { - blk = ctx->pos >> sb->s_blocksize_bits; + while (!error && !stored && *f_pos < i_size_read(inode)) { + blk = (*f_pos) >> sb->s_blocksize_bits; if (ocfs2_read_dir_block(inode, blk, &bh, 0)) { /* Skip the corrupt dirblock and keep trying */ - ctx->pos += sb->s_blocksize - offset; + *f_pos += sb->s_blocksize - offset; continue; } @@ -1875,6 +1897,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode, ra_sectors = 8; } +revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block @@ -1894,64 +1917,93 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode, i += le16_to_cpu(de->rec_len); } offset = i; - ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1)) + *f_pos = ((*f_pos) & ~(sb->s_blocksize - 1)) | offset; *f_version = inode->i_version; } - while (ctx->pos < i_size_read(inode) + while (!error && *f_pos < i_size_read(inode) && offset < sb->s_blocksize) { de = (struct ocfs2_dir_entry *) (bh->b_data + offset); if (!ocfs2_check_dir_entry(inode, de, bh, offset)) { /* On error, skip the f_pos to the next block. */ - ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1; + *f_pos = ((*f_pos) | (sb->s_blocksize - 1)) + 1; brelse(bh); - continue; + goto out; } + offset += le16_to_cpu(de->rec_len); if (le64_to_cpu(de->inode)) { + /* We might block in the next section + * if the data destination is + * currently swapped out. So, use a + * version stamp to detect whether or + * not the directory has been modified + * during the copy operation. + */ + unsigned long version = *f_version; unsigned char d_type = DT_UNKNOWN; if (de->file_type < OCFS2_FT_MAX) d_type = ocfs2_filetype_table[de->file_type]; - if (!dir_emit(ctx, de->name, + error = filldir(priv, de->name, de->name_len, + *f_pos, le64_to_cpu(de->inode), - d_type)) { - brelse(bh); - return 0; + d_type); + if (error) { + if (filldir_err) + *filldir_err = error; + break; } - stored++; + if (version != *f_version) + goto revalidate; + stored ++; } - offset += le16_to_cpu(de->rec_len); - ctx->pos += le16_to_cpu(de->rec_len); + *f_pos += le16_to_cpu(de->rec_len); } offset = 0; brelse(bh); bh = NULL; - if (!persist && stored) - break; } - return 0; + + stored = 0; +out: + return stored; } static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version, - struct dir_context *ctx, - bool persist) + loff_t *f_pos, void *priv, filldir_t filldir, + int *filldir_err) { if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) - return ocfs2_dir_foreach_blk_id(inode, f_version, ctx); - return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist); + return ocfs2_dir_foreach_blk_id(inode, f_version, f_pos, priv, + filldir, filldir_err); + + return ocfs2_dir_foreach_blk_el(inode, f_version, f_pos, priv, filldir, + filldir_err); } /* * This is intended to be called from inside other kernel functions, * so we fake some arguments. */ -int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx) +int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv, + filldir_t filldir) { + int ret = 0, filldir_err = 0; u64 version = inode->i_version; - ocfs2_dir_foreach_blk(inode, &version, ctx, true); + + while (*f_pos < i_size_read(inode)) { + ret = ocfs2_dir_foreach_blk(inode, &version, f_pos, priv, + filldir, &filldir_err); + if (ret || filldir_err) + break; + } + + if (ret > 0) + ret = -EIO; + return 0; } @@ -1959,15 +2011,15 @@ int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx) * ocfs2_readdir() * */ -int ocfs2_readdir(struct file *file, struct dir_context *ctx) +int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) { int error = 0; - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); int lock_level = 0; trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno); - error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level); + error = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level); if (lock_level && error >= 0) { /* We release EX lock which used to update atime * and get PR lock again to reduce contention @@ -1983,7 +2035,8 @@ int ocfs2_readdir(struct file *file, struct dir_context *ctx) goto bail_nolock; } - error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false); + error = ocfs2_dir_foreach_blk(inode, &filp->f_version, &filp->f_pos, + dirent, filldir, NULL); ocfs2_inode_unlock(inode, lock_level); if (error) @@ -2067,7 +2120,6 @@ int ocfs2_check_dir_for_entry(struct inode *dir, } struct ocfs2_empty_dir_priv { - struct dir_context ctx; unsigned seen_dot; unsigned seen_dot_dot; unsigned seen_other; @@ -2152,9 +2204,8 @@ static int ocfs2_empty_dir_dx(struct inode *inode, int ocfs2_empty_dir(struct inode *inode) { int ret; - struct ocfs2_empty_dir_priv priv = { - .ctx.actor = ocfs2_empty_dir_filldir - }; + loff_t start = 0; + struct ocfs2_empty_dir_priv priv; memset(&priv, 0, sizeof(priv)); @@ -2168,7 +2219,7 @@ int ocfs2_empty_dir(struct inode *inode) */ } - ret = ocfs2_dir_foreach(inode, &priv.ctx); + ret = ocfs2_dir_foreach(inode, &start, &priv, ocfs2_empty_dir_filldir); if (ret) mlog_errno(ret); diff --git a/trunk/fs/ocfs2/dir.h b/trunk/fs/ocfs2/dir.h index f0344b75b14d..e683f3deb645 100644 --- a/trunk/fs/ocfs2/dir.h +++ b/trunk/fs/ocfs2/dir.h @@ -92,8 +92,9 @@ int ocfs2_find_files_on_disk(const char *name, struct ocfs2_dir_lookup_result *res); int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name, int namelen, u64 *blkno); -int ocfs2_readdir(struct file *file, struct dir_context *ctx); -int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx); +int ocfs2_readdir(struct file *filp, void *dirent, filldir_t filldir); +int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv, + filldir_t filldir); int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb, struct inode *dir, struct buffer_head *parent_fe_bh, diff --git a/trunk/fs/ocfs2/dlm/dlmrecovery.c b/trunk/fs/ocfs2/dlm/dlmrecovery.c index e68588e6b1e8..b3fdd1a323d6 100644 --- a/trunk/fs/ocfs2/dlm/dlmrecovery.c +++ b/trunk/fs/ocfs2/dlm/dlmrecovery.c @@ -1408,7 +1408,6 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, mres->lockname_len, mres->lockname); ret = -EFAULT; spin_unlock(&res->spinlock); - dlm_lockres_put(res); goto leave; } res->state |= DLM_LOCK_RES_MIGRATING; diff --git a/trunk/fs/ocfs2/extent_map.c b/trunk/fs/ocfs2/extent_map.c index 2487116d0d33..1c39efb71bab 100644 --- a/trunk/fs/ocfs2/extent_map.c +++ b/trunk/fs/ocfs2/extent_map.c @@ -790,7 +790,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, &hole_size, &rec, &is_last); if (ret) { mlog_errno(ret); - goto out_unlock; + goto out; } if (rec.e_blkno == 0ULL) { diff --git a/trunk/fs/ocfs2/file.c b/trunk/fs/ocfs2/file.c index 8a38714f1d92..8a7509f9e6f5 100644 --- a/trunk/fs/ocfs2/file.c +++ b/trunk/fs/ocfs2/file.c @@ -2288,7 +2288,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, ret = ocfs2_inode_lock(inode, NULL, 1); if (ret < 0) { mlog_errno(ret); - goto out; + goto out_sems; } ocfs2_inode_unlock(inode, 1); @@ -2712,7 +2712,7 @@ const struct file_operations ocfs2_fops = { const struct file_operations ocfs2_dops = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = ocfs2_readdir, + .readdir = ocfs2_readdir, .fsync = ocfs2_sync_file, .release = ocfs2_dir_release, .open = ocfs2_dir_open, @@ -2759,7 +2759,7 @@ const struct file_operations ocfs2_fops_no_plocks = { const struct file_operations ocfs2_dops_no_plocks = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = ocfs2_readdir, + .readdir = ocfs2_readdir, .fsync = ocfs2_sync_file, .release = ocfs2_dir_release, .open = ocfs2_dir_open, diff --git a/trunk/fs/ocfs2/journal.c b/trunk/fs/ocfs2/journal.c index 242170d83971..8eccfabcd12e 100644 --- a/trunk/fs/ocfs2/journal.c +++ b/trunk/fs/ocfs2/journal.c @@ -1941,7 +1941,6 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb) } struct ocfs2_orphan_filldir_priv { - struct dir_context ctx; struct inode *head; struct ocfs2_super *osb; }; @@ -1978,11 +1977,11 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, { int status; struct inode *orphan_dir_inode = NULL; - struct ocfs2_orphan_filldir_priv priv = { - .ctx.actor = ocfs2_orphan_filldir, - .osb = osb, - .head = *head - }; + struct ocfs2_orphan_filldir_priv priv; + loff_t pos = 0; + + priv.osb = osb; + priv.head = *head; orphan_dir_inode = ocfs2_get_system_file_inode(osb, ORPHAN_DIR_SYSTEM_INODE, @@ -2000,7 +1999,8 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, goto out; } - status = ocfs2_dir_foreach(orphan_dir_inode, &priv.ctx); + status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv, + ocfs2_orphan_filldir); if (status) { mlog_errno(status); goto out_cluster; diff --git a/trunk/fs/ocfs2/namei.c b/trunk/fs/ocfs2/namei.c index b4a5cdf9dbc5..04ee1b57c243 100644 --- a/trunk/fs/ocfs2/namei.c +++ b/trunk/fs/ocfs2/namei.c @@ -947,7 +947,7 @@ static int ocfs2_unlink(struct inode *dir, ocfs2_free_dir_lookup_result(&orphan_insert); ocfs2_free_dir_lookup_result(&lookup); - if (status && (status != -ENOTEMPTY)) + if (status) mlog_errno(status); return status; @@ -2216,7 +2216,7 @@ static int ocfs2_prep_new_orphaned_file(struct inode *dir, brelse(orphan_dir_bh); - return ret; + return 0; } int ocfs2_create_inode_in_orphan(struct inode *dir, diff --git a/trunk/fs/omfs/dir.c b/trunk/fs/omfs/dir.c index 1b8e9e8405b2..acbaebcad3a8 100644 --- a/trunk/fs/omfs/dir.c +++ b/trunk/fs/omfs/dir.c @@ -327,23 +327,26 @@ int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, return is_bad; } -static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx, +static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir, u64 fsblock, int hindex) { + struct inode *dir = file_inode(filp); + struct buffer_head *bh; + struct omfs_inode *oi; + u64 self; + int res = 0; + unsigned char d_type; + /* follow chain in this bucket */ while (fsblock != ~0) { - struct buffer_head *bh = omfs_bread(dir->i_sb, fsblock); - struct omfs_inode *oi; - u64 self; - unsigned char d_type; - + bh = omfs_bread(dir->i_sb, fsblock); if (!bh) - return true; + goto out; oi = (struct omfs_inode *) bh->b_data; if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, fsblock)) { brelse(bh); - return true; + goto out; } self = fsblock; @@ -358,16 +361,15 @@ static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx, d_type = (oi->i_type == OMFS_DIR) ? DT_DIR : DT_REG; - if (!dir_emit(ctx, oi->i_name, - strnlen(oi->i_name, OMFS_NAMELEN), - self, d_type)) { - brelse(bh); - return false; - } + res = filldir(dirent, oi->i_name, strnlen(oi->i_name, + OMFS_NAMELEN), filp->f_pos, self, d_type); brelse(bh); - ctx->pos++; + if (res < 0) + break; + filp->f_pos++; } - return true; +out: + return res; } static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry, @@ -401,44 +403,60 @@ static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry, return err; } -static int omfs_readdir(struct file *file, struct dir_context *ctx) +static int omfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *dir = file_inode(file); + struct inode *dir = file_inode(filp); struct buffer_head *bh; - __be64 *p; + loff_t offset, res; unsigned int hchain, hindex; int nbuckets; - - if (ctx->pos >> 32) - return -EINVAL; - - if (ctx->pos < 1 << 20) { - if (!dir_emit_dots(file, ctx)) - return 0; - ctx->pos = 1 << 20; + u64 fsblock; + int ret = -EINVAL; + + if (filp->f_pos >> 32) + goto success; + + switch ((unsigned long) filp->f_pos) { + case 0: + if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0) + goto success; + filp->f_pos++; + /* fall through */ + case 1: + if (filldir(dirent, "..", 2, 1, + parent_ino(filp->f_dentry), DT_DIR) < 0) + goto success; + filp->f_pos = 1 << 20; + /* fall through */ } nbuckets = (dir->i_size - OMFS_DIR_START) / 8; /* high 12 bits store bucket + 1 and low 20 bits store hash index */ - hchain = (ctx->pos >> 20) - 1; - hindex = ctx->pos & 0xfffff; + hchain = (filp->f_pos >> 20) - 1; + hindex = filp->f_pos & 0xfffff; bh = omfs_bread(dir->i_sb, dir->i_ino); if (!bh) - return -EINVAL; + goto out; - p = (__be64 *)(bh->b_data + OMFS_DIR_START) + hchain; + offset = OMFS_DIR_START + hchain * 8; - for (; hchain < nbuckets; hchain++) { - __u64 fsblock = be64_to_cpu(*p++); - if (!omfs_fill_chain(dir, ctx, fsblock, hindex)) - break; + for (; hchain < nbuckets; hchain++, offset += 8) { + fsblock = be64_to_cpu(*((__be64 *) &bh->b_data[offset])); + + res = omfs_fill_chain(filp, dirent, filldir, fsblock, hindex); hindex = 0; - ctx->pos = (hchain+2) << 20; + if (res < 0) + break; + + filp->f_pos = (hchain+2) << 20; } brelse(bh); - return 0; +success: + ret = 0; +out: + return ret; } const struct inode_operations omfs_dir_inops = { @@ -452,6 +470,6 @@ const struct inode_operations omfs_dir_inops = { const struct file_operations omfs_dir_operations = { .read = generic_read_dir, - .iterate = omfs_readdir, + .readdir = omfs_readdir, .llseek = generic_file_llseek, }; diff --git a/trunk/fs/openpromfs/inode.c b/trunk/fs/openpromfs/inode.c index 8c0ceb8dd1f7..75885ffde44e 100644 --- a/trunk/fs/openpromfs/inode.c +++ b/trunk/fs/openpromfs/inode.c @@ -162,11 +162,11 @@ static const struct file_operations openpromfs_prop_ops = { .release = seq_release, }; -static int openpromfs_readdir(struct file *, struct dir_context *); +static int openpromfs_readdir(struct file *, void *, filldir_t); static const struct file_operations openprom_operations = { .read = generic_read_dir, - .iterate = openpromfs_readdir, + .readdir = openpromfs_readdir, .llseek = generic_file_llseek, }; @@ -260,64 +260,71 @@ static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry return NULL; } -static int openpromfs_readdir(struct file *file, struct dir_context *ctx) +static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct op_inode_info *oi = OP_I(inode); struct device_node *dp = oi->u.node; struct device_node *child; struct property *prop; + unsigned int ino; int i; mutex_lock(&op_mutex); - if (ctx->pos == 0) { - if (!dir_emit(ctx, ".", 1, inode->i_ino, DT_DIR)) + ino = inode->i_ino; + i = filp->f_pos; + switch (i) { + case 0: + if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) goto out; - ctx->pos = 1; - } - if (ctx->pos == 1) { - if (!dir_emit(ctx, "..", 2, + i++; + filp->f_pos++; + /* fall thru */ + case 1: + if (filldir(dirent, "..", 2, i, (dp->parent == NULL ? OPENPROM_ROOT_INO : - dp->parent->unique_id), DT_DIR)) - goto out; - ctx->pos = 2; - } - i = ctx->pos - 2; - - /* First, the children nodes as directories. */ - child = dp->child; - while (i && child) { - child = child->sibling; - i--; - } - while (child) { - if (!dir_emit(ctx, - child->path_component_name, - strlen(child->path_component_name), - child->unique_id, DT_DIR)) + dp->parent->unique_id), DT_DIR) < 0) goto out; + i++; + filp->f_pos++; + /* fall thru */ + default: + i -= 2; + + /* First, the children nodes as directories. */ + child = dp->child; + while (i && child) { + child = child->sibling; + i--; + } + while (child) { + if (filldir(dirent, + child->path_component_name, + strlen(child->path_component_name), + filp->f_pos, child->unique_id, DT_DIR) < 0) + goto out; + + filp->f_pos++; + child = child->sibling; + } - ctx->pos++; - child = child->sibling; - } - - /* Next, the properties as files. */ - prop = dp->properties; - while (i && prop) { - prop = prop->next; - i--; - } - while (prop) { - if (!dir_emit(ctx, prop->name, strlen(prop->name), - prop->unique_id, DT_REG)) - goto out; + /* Next, the properties as files. */ + prop = dp->properties; + while (i && prop) { + prop = prop->next; + i--; + } + while (prop) { + if (filldir(dirent, prop->name, strlen(prop->name), + filp->f_pos, prop->unique_id, DT_REG) < 0) + goto out; - ctx->pos++; - prop = prop->next; + filp->f_pos++; + prop = prop->next; + } } - out: mutex_unlock(&op_mutex); return 0; diff --git a/trunk/fs/pnode.c b/trunk/fs/pnode.c index 9af0df15256e..3d2a7141b87a 100644 --- a/trunk/fs/pnode.c +++ b/trunk/fs/pnode.c @@ -83,8 +83,7 @@ static int do_make_slave(struct mount *mnt) if (peer_mnt == mnt) peer_mnt = NULL; } - if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) && - list_empty(&mnt->mnt_share)) + if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share)) mnt_release_group_id(mnt); list_del_init(&mnt->mnt_share); diff --git a/trunk/fs/proc/base.c b/trunk/fs/proc/base.c index 0016350ad95e..dd51e50001fe 100644 --- a/trunk/fs/proc/base.c +++ b/trunk/fs/proc/base.c @@ -1681,11 +1681,11 @@ const struct dentry_operations pid_dentry_operations = * reported by readdir in sync with the inode numbers reported * by stat. */ -bool proc_fill_cache(struct file *file, struct dir_context *ctx, +int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, const char *name, int len, instantiate_t instantiate, struct task_struct *task, const void *ptr) { - struct dentry *child, *dir = file->f_path.dentry; + struct dentry *child, *dir = filp->f_path.dentry; struct inode *inode; struct qstr qname; ino_t ino = 0; @@ -1720,7 +1720,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, ino = find_inode_number(dir, &qname); if (!ino) ino = 1; - return dir_emit(ctx, name, len, ino, type); + return filldir(dirent, name, len, filp->f_pos, ino, type); } #ifdef CONFIG_CHECKPOINT_RESTORE @@ -1931,15 +1931,14 @@ static const struct inode_operations proc_map_files_inode_operations = { }; static int -proc_map_files_readdir(struct file *file, struct dir_context *ctx) +proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) { + struct dentry *dentry = filp->f_path.dentry; + struct inode *inode = dentry->d_inode; struct vm_area_struct *vma; struct task_struct *task; struct mm_struct *mm; - unsigned long nr_files, pos, i; - struct flex_array *fa = NULL; - struct map_files_info info; - struct map_files_info *p; + ino_t ino; int ret; ret = -EPERM; @@ -1947,7 +1946,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) goto out; ret = -ENOENT; - task = get_proc_task(file_inode(file)); + task = get_proc_task(inode); if (!task) goto out; @@ -1956,73 +1955,91 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) goto out_put_task; ret = 0; - if (!dir_emit_dots(file, ctx)) - goto out_put_task; + switch (filp->f_pos) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0) + goto out_put_task; + filp->f_pos++; + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) + goto out_put_task; + filp->f_pos++; + default: + { + unsigned long nr_files, pos, i; + struct flex_array *fa = NULL; + struct map_files_info info; + struct map_files_info *p; + + mm = get_task_mm(task); + if (!mm) + goto out_put_task; + down_read(&mm->mmap_sem); - mm = get_task_mm(task); - if (!mm) - goto out_put_task; - down_read(&mm->mmap_sem); + nr_files = 0; - nr_files = 0; + /* + * We need two passes here: + * + * 1) Collect vmas of mapped files with mmap_sem taken + * 2) Release mmap_sem and instantiate entries + * + * otherwise we get lockdep complained, since filldir() + * routine might require mmap_sem taken in might_fault(). + */ - /* - * We need two passes here: - * - * 1) Collect vmas of mapped files with mmap_sem taken - * 2) Release mmap_sem and instantiate entries - * - * otherwise we get lockdep complained, since filldir() - * routine might require mmap_sem taken in might_fault(). - */ + for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) { + if (vma->vm_file && ++pos > filp->f_pos) + nr_files++; + } - for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) { - if (vma->vm_file && ++pos > ctx->pos) - nr_files++; - } - - if (nr_files) { - fa = flex_array_alloc(sizeof(info), nr_files, - GFP_KERNEL); - if (!fa || flex_array_prealloc(fa, 0, nr_files, - GFP_KERNEL)) { - ret = -ENOMEM; - if (fa) - flex_array_free(fa); - up_read(&mm->mmap_sem); - mmput(mm); - goto out_put_task; + if (nr_files) { + fa = flex_array_alloc(sizeof(info), nr_files, + GFP_KERNEL); + if (!fa || flex_array_prealloc(fa, 0, nr_files, + GFP_KERNEL)) { + ret = -ENOMEM; + if (fa) + flex_array_free(fa); + up_read(&mm->mmap_sem); + mmput(mm); + goto out_put_task; + } + for (i = 0, vma = mm->mmap, pos = 2; vma; + vma = vma->vm_next) { + if (!vma->vm_file) + continue; + if (++pos <= filp->f_pos) + continue; + + info.mode = vma->vm_file->f_mode; + info.len = snprintf(info.name, + sizeof(info.name), "%lx-%lx", + vma->vm_start, vma->vm_end); + if (flex_array_put(fa, i++, &info, GFP_KERNEL)) + BUG(); + } } - for (i = 0, vma = mm->mmap, pos = 2; vma; - vma = vma->vm_next) { - if (!vma->vm_file) - continue; - if (++pos <= ctx->pos) - continue; - - info.mode = vma->vm_file->f_mode; - info.len = snprintf(info.name, - sizeof(info.name), "%lx-%lx", - vma->vm_start, vma->vm_end); - if (flex_array_put(fa, i++, &info, GFP_KERNEL)) - BUG(); + up_read(&mm->mmap_sem); + + for (i = 0; i < nr_files; i++) { + p = flex_array_get(fa, i); + ret = proc_fill_cache(filp, dirent, filldir, + p->name, p->len, + proc_map_files_instantiate, + task, + (void *)(unsigned long)p->mode); + if (ret) + break; + filp->f_pos++; } + if (fa) + flex_array_free(fa); + mmput(mm); } - up_read(&mm->mmap_sem); - - for (i = 0; i < nr_files; i++) { - p = flex_array_get(fa, i); - if (!proc_fill_cache(file, ctx, - p->name, p->len, - proc_map_files_instantiate, - task, - (void *)(unsigned long)p->mode)) - break; - ctx->pos++; } - if (fa) - flex_array_free(fa); - mmput(mm); out_put_task: put_task_struct(task); @@ -2032,7 +2049,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) static const struct file_operations proc_map_files_operations = { .read = generic_read_dir, - .iterate = proc_map_files_readdir, + .readdir = proc_map_files_readdir, .llseek = default_llseek, }; @@ -2101,7 +2118,6 @@ static int show_timer(struct seq_file *m, void *v) nstr[notify & ~SIGEV_THREAD_ID], (notify & SIGEV_THREAD_ID) ? "tid" : "pid", pid_nr_ns(timer->it_pid, tp->ns)); - seq_printf(m, "ClockID: %d\n", timer->it_clock); return 0; } @@ -2200,30 +2216,67 @@ static struct dentry *proc_pident_lookup(struct inode *dir, return error; } -static int proc_pident_readdir(struct file *file, struct dir_context *ctx, +static int proc_pident_fill_cache(struct file *filp, void *dirent, + filldir_t filldir, struct task_struct *task, const struct pid_entry *p) +{ + return proc_fill_cache(filp, dirent, filldir, p->name, p->len, + proc_pident_instantiate, task, p); +} + +static int proc_pident_readdir(struct file *filp, + void *dirent, filldir_t filldir, const struct pid_entry *ents, unsigned int nents) { - struct task_struct *task = get_proc_task(file_inode(file)); - const struct pid_entry *p; + int i; + struct dentry *dentry = filp->f_path.dentry; + struct inode *inode = dentry->d_inode; + struct task_struct *task = get_proc_task(inode); + const struct pid_entry *p, *last; + ino_t ino; + int ret; + ret = -ENOENT; if (!task) - return -ENOENT; - - if (!dir_emit_dots(file, ctx)) - goto out; - - if (ctx->pos >= nents + 2) - goto out; + goto out_no_task; - for (p = ents + (ctx->pos - 2); p <= ents + nents - 1; p++) { - if (!proc_fill_cache(file, ctx, p->name, p->len, - proc_pident_instantiate, task, p)) - break; - ctx->pos++; + ret = 0; + i = filp->f_pos; + switch (i) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) + goto out; + i++; + filp->f_pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) + goto out; + i++; + filp->f_pos++; + /* fall through */ + default: + i -= 2; + if (i >= nents) { + ret = 1; + goto out; + } + p = ents + i; + last = &ents[nents - 1]; + while (p <= last) { + if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0) + goto out; + filp->f_pos++; + p++; + } } + + ret = 1; out: put_task_struct(task); - return 0; +out_no_task: + return ret; } #ifdef CONFIG_SECURITY @@ -2308,15 +2361,16 @@ static const struct pid_entry attr_dir_stuff[] = { REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), }; -static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx) +static int proc_attr_dir_readdir(struct file * filp, + void * dirent, filldir_t filldir) { - return proc_pident_readdir(file, ctx, - attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); + return proc_pident_readdir(filp,dirent,filldir, + attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff)); } static const struct file_operations proc_attr_dir_operations = { .read = generic_read_dir, - .iterate = proc_attr_dir_readdir, + .readdir = proc_attr_dir_readdir, .llseek = default_llseek, }; @@ -2670,15 +2724,16 @@ static const struct pid_entry tgid_base_stuff[] = { #endif }; -static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) +static int proc_tgid_base_readdir(struct file * filp, + void * dirent, filldir_t filldir) { - return proc_pident_readdir(file, ctx, - tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); + return proc_pident_readdir(filp,dirent,filldir, + tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff)); } static const struct file_operations proc_tgid_base_operations = { .read = generic_read_dir, - .iterate = proc_tgid_base_readdir, + .readdir = proc_tgid_base_readdir, .llseek = default_llseek, }; @@ -2880,42 +2935,58 @@ static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter ite #define TGID_OFFSET (FIRST_PROCESS_ENTRY + 1) +static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir, + struct tgid_iter iter) +{ + char name[PROC_NUMBUF]; + int len = snprintf(name, sizeof(name), "%d", iter.tgid); + return proc_fill_cache(filp, dirent, filldir, name, len, + proc_pid_instantiate, iter.task, NULL); +} + +static int fake_filldir(void *buf, const char *name, int namelen, + loff_t offset, u64 ino, unsigned d_type) +{ + return 0; +} + /* for the /proc/ directory itself, after non-process stuff has been done */ -int proc_pid_readdir(struct file *file, struct dir_context *ctx) +int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) { struct tgid_iter iter; struct pid_namespace *ns; - loff_t pos = ctx->pos; + filldir_t __filldir; + loff_t pos = filp->f_pos; if (pos >= PID_MAX_LIMIT + TGID_OFFSET) - return 0; + goto out; if (pos == TGID_OFFSET - 1) { - if (!proc_fill_cache(file, ctx, "self", 4, NULL, NULL, NULL)) - return 0; + if (proc_fill_cache(filp, dirent, filldir, "self", 4, + NULL, NULL, NULL) < 0) + goto out; iter.tgid = 0; } else { iter.tgid = pos - TGID_OFFSET; } iter.task = NULL; - ns = file->f_dentry->d_sb->s_fs_info; + ns = filp->f_dentry->d_sb->s_fs_info; for (iter = next_tgid(ns, iter); iter.task; iter.tgid += 1, iter = next_tgid(ns, iter)) { - char name[PROC_NUMBUF]; - int len; - if (!has_pid_permissions(ns, iter.task, 2)) - continue; + if (has_pid_permissions(ns, iter.task, 2)) + __filldir = filldir; + else + __filldir = fake_filldir; - len = snprintf(name, sizeof(name), "%d", iter.tgid); - ctx->pos = iter.tgid + TGID_OFFSET; - if (!proc_fill_cache(file, ctx, name, len, - proc_pid_instantiate, iter.task, NULL)) { + filp->f_pos = iter.tgid + TGID_OFFSET; + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) { put_task_struct(iter.task); - return 0; + goto out; } } - ctx->pos = PID_MAX_LIMIT + TGID_OFFSET; + filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET; +out: return 0; } @@ -3003,10 +3074,11 @@ static const struct pid_entry tid_base_stuff[] = { #endif }; -static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) +static int proc_tid_base_readdir(struct file * filp, + void * dirent, filldir_t filldir) { - return proc_pident_readdir(file, ctx, - tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); + return proc_pident_readdir(filp,dirent,filldir, + tid_base_stuff,ARRAY_SIZE(tid_base_stuff)); } static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) @@ -3017,7 +3089,7 @@ static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *den static const struct file_operations proc_tid_base_operations = { .read = generic_read_dir, - .iterate = proc_tid_base_readdir, + .readdir = proc_tid_base_readdir, .llseek = default_llseek, }; @@ -3158,16 +3230,30 @@ static struct task_struct *next_tid(struct task_struct *start) return pos; } +static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir, + struct task_struct *task, int tid) +{ + char name[PROC_NUMBUF]; + int len = snprintf(name, sizeof(name), "%d", tid); + return proc_fill_cache(filp, dirent, filldir, name, len, + proc_task_instantiate, task, NULL); +} + /* for the /proc/TGID/task/ directories */ -static int proc_task_readdir(struct file *file, struct dir_context *ctx) +static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir) { + struct dentry *dentry = filp->f_path.dentry; + struct inode *inode = dentry->d_inode; struct task_struct *leader = NULL; - struct task_struct *task = get_proc_task(file_inode(file)); - struct pid_namespace *ns; + struct task_struct *task; + int retval = -ENOENT; + ino_t ino; int tid; + struct pid_namespace *ns; + task = get_proc_task(inode); if (!task) - return -ENOENT; + goto out_no_task; rcu_read_lock(); if (pid_alive(task)) { leader = task->group_leader; @@ -3176,36 +3262,46 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx) rcu_read_unlock(); put_task_struct(task); if (!leader) - return -ENOENT; + goto out_no_task; + retval = 0; - if (!dir_emit_dots(file, ctx)) - goto out; + switch ((unsigned long)filp->f_pos) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) < 0) + goto out; + filp->f_pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) < 0) + goto out; + filp->f_pos++; + /* fall through */ + } /* f_version caches the tgid value that the last readdir call couldn't * return. lseek aka telldir automagically resets f_version to 0. */ - ns = file->f_dentry->d_sb->s_fs_info; - tid = (int)file->f_version; - file->f_version = 0; - for (task = first_tid(leader, tid, ctx->pos - 2, ns); + ns = filp->f_dentry->d_sb->s_fs_info; + tid = (int)filp->f_version; + filp->f_version = 0; + for (task = first_tid(leader, tid, filp->f_pos - 2, ns); task; - task = next_tid(task), ctx->pos++) { - char name[PROC_NUMBUF]; - int len; + task = next_tid(task), filp->f_pos++) { tid = task_pid_nr_ns(task, ns); - len = snprintf(name, sizeof(name), "%d", tid); - if (!proc_fill_cache(file, ctx, name, len, - proc_task_instantiate, task, NULL)) { + if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) { /* returning this tgid failed, save it as the first * pid for the next readir call */ - file->f_version = (u64)tid; + filp->f_version = (u64)tid; put_task_struct(task); break; } } out: put_task_struct(leader); - return 0; +out_no_task: + return retval; } static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) @@ -3231,6 +3327,6 @@ static const struct inode_operations proc_task_inode_operations = { static const struct file_operations proc_task_operations = { .read = generic_read_dir, - .iterate = proc_task_readdir, + .readdir = proc_task_readdir, .llseek = default_llseek, }; diff --git a/trunk/fs/proc/fd.c b/trunk/fs/proc/fd.c index 1441f143c43b..d7a4a28ef630 100644 --- a/trunk/fs/proc/fd.c +++ b/trunk/fs/proc/fd.c @@ -219,58 +219,74 @@ static struct dentry *proc_lookupfd_common(struct inode *dir, return result; } -static int proc_readfd_common(struct file *file, struct dir_context *ctx, - instantiate_t instantiate) +static int proc_readfd_common(struct file * filp, void * dirent, + filldir_t filldir, instantiate_t instantiate) { - struct task_struct *p = get_proc_task(file_inode(file)); + struct dentry *dentry = filp->f_path.dentry; + struct inode *inode = dentry->d_inode; + struct task_struct *p = get_proc_task(inode); struct files_struct *files; - unsigned int fd; + unsigned int fd, ino; + int retval; + retval = -ENOENT; if (!p) - return -ENOENT; - - if (!dir_emit_dots(file, ctx)) - goto out; - if (!dir_emit_dots(file, ctx)) - goto out; - files = get_files_struct(p); - if (!files) - goto out; + goto out_no_task; + retval = 0; + + fd = filp->f_pos; + switch (fd) { + case 0: + if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) + goto out; + filp->f_pos++; + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) + goto out; + filp->f_pos++; + default: + files = get_files_struct(p); + if (!files) + goto out; + rcu_read_lock(); + for (fd = filp->f_pos - 2; + fd < files_fdtable(files)->max_fds; + fd++, filp->f_pos++) { + char name[PROC_NUMBUF]; + int len; + int rv; + + if (!fcheck_files(files, fd)) + continue; + rcu_read_unlock(); - rcu_read_lock(); - for (fd = ctx->pos - 2; - fd < files_fdtable(files)->max_fds; - fd++, ctx->pos++) { - char name[PROC_NUMBUF]; - int len; - - if (!fcheck_files(files, fd)) - continue; - rcu_read_unlock(); - - len = snprintf(name, sizeof(name), "%d", fd); - if (!proc_fill_cache(file, ctx, - name, len, instantiate, p, - (void *)(unsigned long)fd)) - goto out_fd_loop; - rcu_read_lock(); - } - rcu_read_unlock(); + len = snprintf(name, sizeof(name), "%d", fd); + rv = proc_fill_cache(filp, dirent, filldir, + name, len, instantiate, p, + (void *)(unsigned long)fd); + if (rv < 0) + goto out_fd_loop; + rcu_read_lock(); + } + rcu_read_unlock(); out_fd_loop: - put_files_struct(files); + put_files_struct(files); + } out: put_task_struct(p); - return 0; +out_no_task: + return retval; } -static int proc_readfd(struct file *file, struct dir_context *ctx) +static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir) { - return proc_readfd_common(file, ctx, proc_fd_instantiate); + return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate); } const struct file_operations proc_fd_operations = { .read = generic_read_dir, - .iterate = proc_readfd, + .readdir = proc_readfd, .llseek = default_llseek, }; @@ -335,9 +351,9 @@ proc_lookupfdinfo(struct inode *dir, struct dentry *dentry, unsigned int flags) return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate); } -static int proc_readfdinfo(struct file *file, struct dir_context *ctx) +static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir) { - return proc_readfd_common(file, ctx, + return proc_readfd_common(filp, dirent, filldir, proc_fdinfo_instantiate); } @@ -348,6 +364,6 @@ const struct inode_operations proc_fdinfo_inode_operations = { const struct file_operations proc_fdinfo_operations = { .read = generic_read_dir, - .iterate = proc_readfdinfo, + .readdir = proc_readfdinfo, .llseek = default_llseek, }; diff --git a/trunk/fs/proc/generic.c b/trunk/fs/proc/generic.c index 94441a407337..a2596afffae6 100644 --- a/trunk/fs/proc/generic.c +++ b/trunk/fs/proc/generic.c @@ -233,52 +233,76 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, * value of the readdir() call, as long as it's non-negative * for success.. */ -int proc_readdir_de(struct proc_dir_entry *de, struct file *file, - struct dir_context *ctx) +int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, + filldir_t filldir) { + unsigned int ino; int i; + struct inode *inode = file_inode(filp); + int ret = 0; + + ino = inode->i_ino; + i = filp->f_pos; + switch (i) { + case 0: + if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) + goto out; + i++; + filp->f_pos++; + /* fall through */ + case 1: + if (filldir(dirent, "..", 2, i, + parent_ino(filp->f_path.dentry), + DT_DIR) < 0) + goto out; + i++; + filp->f_pos++; + /* fall through */ + default: + spin_lock(&proc_subdir_lock); + de = de->subdir; + i -= 2; + for (;;) { + if (!de) { + ret = 1; + spin_unlock(&proc_subdir_lock); + goto out; + } + if (!i) + break; + de = de->next; + i--; + } - if (!dir_emit_dots(file, ctx)) - return 0; - - spin_lock(&proc_subdir_lock); - de = de->subdir; - i = ctx->pos - 2; - for (;;) { - if (!de) { + do { + struct proc_dir_entry *next; + + /* filldir passes info to user space */ + pde_get(de); + spin_unlock(&proc_subdir_lock); + if (filldir(dirent, de->name, de->namelen, filp->f_pos, + de->low_ino, de->mode >> 12) < 0) { + pde_put(de); + goto out; + } + spin_lock(&proc_subdir_lock); + filp->f_pos++; + next = de->next; + pde_put(de); + de = next; + } while (de); spin_unlock(&proc_subdir_lock); - return 0; - } - if (!i) - break; - de = de->next; - i--; } - - do { - struct proc_dir_entry *next; - pde_get(de); - spin_unlock(&proc_subdir_lock); - if (!dir_emit(ctx, de->name, de->namelen, - de->low_ino, de->mode >> 12)) { - pde_put(de); - return 0; - } - spin_lock(&proc_subdir_lock); - ctx->pos++; - next = de->next; - pde_put(de); - de = next; - } while (de); - spin_unlock(&proc_subdir_lock); - return 0; + ret = 1; +out: + return ret; } -int proc_readdir(struct file *file, struct dir_context *ctx) +int proc_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); - return proc_readdir_de(PDE(inode), file, ctx); + return proc_readdir_de(PDE(inode), filp, dirent, filldir); } /* @@ -289,7 +313,7 @@ int proc_readdir(struct file *file, struct dir_context *ctx) static const struct file_operations proc_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = proc_readdir, + .readdir = proc_readdir, }; /* diff --git a/trunk/fs/proc/internal.h b/trunk/fs/proc/internal.h index 4eae2e149f31..d600fb098b6a 100644 --- a/trunk/fs/proc/internal.h +++ b/trunk/fs/proc/internal.h @@ -165,14 +165,14 @@ extern int proc_setattr(struct dentry *, struct iattr *); extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *); extern int pid_revalidate(struct dentry *, unsigned int); extern int pid_delete_dentry(const struct dentry *); -extern int proc_pid_readdir(struct file *, struct dir_context *); +extern int proc_pid_readdir(struct file *, void *, filldir_t); extern struct dentry *proc_pid_lookup(struct inode *, struct dentry *, unsigned int); extern loff_t mem_lseek(struct file *, loff_t, int); /* Lookups */ typedef struct dentry *instantiate_t(struct inode *, struct dentry *, struct task_struct *, const void *); -extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, int, +extern int proc_fill_cache(struct file *, void *, filldir_t, const char *, int, instantiate_t, struct task_struct *, const void *); /* @@ -183,8 +183,8 @@ extern spinlock_t proc_subdir_lock; extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int); extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *, struct dentry *); -extern int proc_readdir(struct file *, struct dir_context *); -extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *); +extern int proc_readdir(struct file *, void *, filldir_t); +extern int proc_readdir_de(struct proc_dir_entry *, struct file *, void *, filldir_t); static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde) { diff --git a/trunk/fs/proc/kmsg.c b/trunk/fs/proc/kmsg.c index bdfabdaefdce..bd4b5a740ff1 100644 --- a/trunk/fs/proc/kmsg.c +++ b/trunk/fs/proc/kmsg.c @@ -21,12 +21,12 @@ extern wait_queue_head_t log_wait; static int kmsg_open(struct inode * inode, struct file * file) { - return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_PROC); + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE); } static int kmsg_release(struct inode * inode, struct file * file) { - (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_PROC); + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE); return 0; } @@ -34,15 +34,15 @@ static ssize_t kmsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { if ((file->f_flags & O_NONBLOCK) && - !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC)) + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) return -EAGAIN; - return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_PROC); + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE); } static unsigned int kmsg_poll(struct file *file, poll_table *wait) { poll_wait(file, &log_wait, wait); - if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC)) + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) return POLLIN | POLLRDNORM; return 0; } diff --git a/trunk/fs/proc/namespaces.c b/trunk/fs/proc/namespaces.c index f6abbbbfad8a..54bdc6701e9f 100644 --- a/trunk/fs/proc/namespaces.c +++ b/trunk/fs/proc/namespaces.c @@ -213,36 +213,74 @@ static struct dentry *proc_ns_instantiate(struct inode *dir, return error; } -static int proc_ns_dir_readdir(struct file *file, struct dir_context *ctx) +static int proc_ns_fill_cache(struct file *filp, void *dirent, + filldir_t filldir, struct task_struct *task, + const struct proc_ns_operations *ops) { - struct task_struct *task = get_proc_task(file_inode(file)); + return proc_fill_cache(filp, dirent, filldir, + ops->name, strlen(ops->name), + proc_ns_instantiate, task, ops); +} + +static int proc_ns_dir_readdir(struct file *filp, void *dirent, + filldir_t filldir) +{ + int i; + struct dentry *dentry = filp->f_path.dentry; + struct inode *inode = dentry->d_inode; + struct task_struct *task = get_proc_task(inode); const struct proc_ns_operations **entry, **last; + ino_t ino; + int ret; + ret = -ENOENT; if (!task) - return -ENOENT; + goto out_no_task; - if (!dir_emit_dots(file, ctx)) - goto out; - if (ctx->pos >= 2 + ARRAY_SIZE(ns_entries)) - goto out; - entry = ns_entries + (ctx->pos - 2); - last = &ns_entries[ARRAY_SIZE(ns_entries) - 1]; - while (entry <= last) { - const struct proc_ns_operations *ops = *entry; - if (!proc_fill_cache(file, ctx, ops->name, strlen(ops->name), - proc_ns_instantiate, task, ops)) - break; - ctx->pos++; - entry++; + ret = 0; + i = filp->f_pos; + switch (i) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) + goto out; + i++; + filp->f_pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) + goto out; + i++; + filp->f_pos++; + /* fall through */ + default: + i -= 2; + if (i >= ARRAY_SIZE(ns_entries)) { + ret = 1; + goto out; + } + entry = ns_entries + i; + last = &ns_entries[ARRAY_SIZE(ns_entries) - 1]; + while (entry <= last) { + if (proc_ns_fill_cache(filp, dirent, filldir, + task, *entry) < 0) + goto out; + filp->f_pos++; + entry++; + } } + + ret = 1; out: put_task_struct(task); - return 0; +out_no_task: + return ret; } const struct file_operations proc_ns_dir_operations = { .read = generic_read_dir, - .iterate = proc_ns_dir_readdir, + .readdir = proc_ns_dir_readdir, }; static struct dentry *proc_ns_dir_lookup(struct inode *dir, diff --git a/trunk/fs/proc/proc_net.c b/trunk/fs/proc/proc_net.c index 4677bb7dc7c2..986e83220d56 100644 --- a/trunk/fs/proc/proc_net.c +++ b/trunk/fs/proc/proc_net.c @@ -160,15 +160,16 @@ const struct inode_operations proc_net_inode_operations = { .getattr = proc_tgid_net_getattr, }; -static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx) +static int proc_tgid_net_readdir(struct file *filp, void *dirent, + filldir_t filldir) { int ret; struct net *net; ret = -EINVAL; - net = get_proc_task_net(file_inode(file)); + net = get_proc_task_net(file_inode(filp)); if (net != NULL) { - ret = proc_readdir_de(net->proc_net, file, ctx); + ret = proc_readdir_de(net->proc_net, filp, dirent, filldir); put_net(net); } return ret; @@ -177,7 +178,7 @@ static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx) const struct file_operations proc_net_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = proc_tgid_net_readdir, + .readdir = proc_tgid_net_readdir, }; static __net_init int proc_net_ns_init(struct net *net) diff --git a/trunk/fs/proc/proc_sysctl.c b/trunk/fs/proc/proc_sysctl.c index f3a570e7c257..ac05f33a0dde 100644 --- a/trunk/fs/proc/proc_sysctl.c +++ b/trunk/fs/proc/proc_sysctl.c @@ -573,12 +573,12 @@ static unsigned int proc_sys_poll(struct file *filp, poll_table *wait) return ret; } -static bool proc_sys_fill_cache(struct file *file, - struct dir_context *ctx, +static int proc_sys_fill_cache(struct file *filp, void *dirent, + filldir_t filldir, struct ctl_table_header *head, struct ctl_table *table) { - struct dentry *child, *dir = file->f_path.dentry; + struct dentry *child, *dir = filp->f_path.dentry; struct inode *inode; struct qstr qname; ino_t ino = 0; @@ -595,38 +595,38 @@ static bool proc_sys_fill_cache(struct file *file, inode = proc_sys_make_inode(dir->d_sb, head, table); if (!inode) { dput(child); - return false; + return -ENOMEM; } else { d_set_d_op(child, &proc_sys_dentry_operations); d_add(child, inode); } } else { - return false; + return -ENOMEM; } } inode = child->d_inode; ino = inode->i_ino; type = inode->i_mode >> 12; dput(child); - return dir_emit(ctx, qname.name, qname.len, ino, type); + return !!filldir(dirent, qname.name, qname.len, filp->f_pos, ino, type); } -static bool proc_sys_link_fill_cache(struct file *file, - struct dir_context *ctx, +static int proc_sys_link_fill_cache(struct file *filp, void *dirent, + filldir_t filldir, struct ctl_table_header *head, struct ctl_table *table) { - bool ret = true; + int err, ret = 0; head = sysctl_head_grab(head); if (S_ISLNK(table->mode)) { /* It is not an error if we can not follow the link ignore it */ - int err = sysctl_follow_link(&head, &table, current->nsproxy); + err = sysctl_follow_link(&head, &table, current->nsproxy); if (err) goto out; } - ret = proc_sys_fill_cache(file, ctx, head, table); + ret = proc_sys_fill_cache(filp, dirent, filldir, head, table); out: sysctl_head_finish(head); return ret; @@ -634,50 +634,67 @@ static bool proc_sys_link_fill_cache(struct file *file, static int scan(struct ctl_table_header *head, ctl_table *table, unsigned long *pos, struct file *file, - struct dir_context *ctx) + void *dirent, filldir_t filldir) { - bool res; + int res; - if ((*pos)++ < ctx->pos) - return true; + if ((*pos)++ < file->f_pos) + return 0; if (unlikely(S_ISLNK(table->mode))) - res = proc_sys_link_fill_cache(file, ctx, head, table); + res = proc_sys_link_fill_cache(file, dirent, filldir, head, table); else - res = proc_sys_fill_cache(file, ctx, head, table); + res = proc_sys_fill_cache(file, dirent, filldir, head, table); - if (res) - ctx->pos = *pos; + if (res == 0) + file->f_pos = *pos; return res; } -static int proc_sys_readdir(struct file *file, struct dir_context *ctx) +static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct ctl_table_header *head = grab_header(file_inode(file)); + struct dentry *dentry = filp->f_path.dentry; + struct inode *inode = dentry->d_inode; + struct ctl_table_header *head = grab_header(inode); struct ctl_table_header *h = NULL; struct ctl_table *entry; struct ctl_dir *ctl_dir; unsigned long pos; + int ret = -EINVAL; if (IS_ERR(head)) return PTR_ERR(head); ctl_dir = container_of(head, struct ctl_dir, header); - if (!dir_emit_dots(file, ctx)) - return 0; - + ret = 0; + /* Avoid a switch here: arm builds fail with missing __cmpdi2 */ + if (filp->f_pos == 0) { + if (filldir(dirent, ".", 1, filp->f_pos, + inode->i_ino, DT_DIR) < 0) + goto out; + filp->f_pos++; + } + if (filp->f_pos == 1) { + if (filldir(dirent, "..", 2, filp->f_pos, + parent_ino(dentry), DT_DIR) < 0) + goto out; + filp->f_pos++; + } pos = 2; for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) { - if (!scan(h, entry, &pos, file, ctx)) { + ret = scan(h, entry, &pos, filp, dirent, filldir); + if (ret) { sysctl_head_finish(h); break; } } + ret = 1; +out: sysctl_head_finish(head); - return 0; + return ret; } static int proc_sys_permission(struct inode *inode, int mask) @@ -752,7 +769,7 @@ static const struct file_operations proc_sys_file_operations = { static const struct file_operations proc_sys_dir_file_operations = { .read = generic_read_dir, - .iterate = proc_sys_readdir, + .readdir = proc_sys_readdir, .llseek = generic_file_llseek, }; diff --git a/trunk/fs/proc/root.c b/trunk/fs/proc/root.c index 229e366598da..41a6ea93f486 100644 --- a/trunk/fs/proc/root.c +++ b/trunk/fs/proc/root.c @@ -202,14 +202,21 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr return proc_pid_lookup(dir, dentry, flags); } -static int proc_root_readdir(struct file *file, struct dir_context *ctx) +static int proc_root_readdir(struct file * filp, + void * dirent, filldir_t filldir) { - if (ctx->pos < FIRST_PROCESS_ENTRY) { - proc_readdir(file, ctx); - ctx->pos = FIRST_PROCESS_ENTRY; + unsigned int nr = filp->f_pos; + int ret; + + if (nr < FIRST_PROCESS_ENTRY) { + int error = proc_readdir(filp, dirent, filldir); + if (error <= 0) + return error; + filp->f_pos = FIRST_PROCESS_ENTRY; } - return proc_pid_readdir(file, ctx); + ret = proc_pid_readdir(filp, dirent, filldir); + return ret; } /* @@ -219,7 +226,7 @@ static int proc_root_readdir(struct file *file, struct dir_context *ctx) */ static const struct file_operations proc_root_operations = { .read = generic_read_dir, - .iterate = proc_root_readdir, + .readdir = proc_root_readdir, .llseek = default_llseek, }; diff --git a/trunk/fs/qnx4/dir.c b/trunk/fs/qnx4/dir.c index b218f965817b..28ce014b3cef 100644 --- a/trunk/fs/qnx4/dir.c +++ b/trunk/fs/qnx4/dir.c @@ -14,9 +14,9 @@ #include #include "qnx4.h" -static int qnx4_readdir(struct file *file, struct dir_context *ctx) +static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); unsigned int offset; struct buffer_head *bh; struct qnx4_inode_entry *de; @@ -26,44 +26,48 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx) int size; QNX4DEBUG((KERN_INFO "qnx4_readdir:i_size = %ld\n", (long) inode->i_size)); - QNX4DEBUG((KERN_INFO "pos = %ld\n", (long) ctx->pos)); + QNX4DEBUG((KERN_INFO "filp->f_pos = %ld\n", (long) filp->f_pos)); - while (ctx->pos < inode->i_size) { - blknum = qnx4_block_map(inode, ctx->pos >> QNX4_BLOCK_SIZE_BITS); + while (filp->f_pos < inode->i_size) { + blknum = qnx4_block_map( inode, filp->f_pos >> QNX4_BLOCK_SIZE_BITS ); bh = sb_bread(inode->i_sb, blknum); - if (bh == NULL) { + if(bh==NULL) { printk(KERN_ERR "qnx4_readdir: bread failed (%ld)\n", blknum); - return 0; + break; } - ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK; - for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) { + ix = (int)(filp->f_pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK; + while (ix < QNX4_INODES_PER_BLOCK) { offset = ix * QNX4_DIR_ENTRY_SIZE; de = (struct qnx4_inode_entry *) (bh->b_data + offset); - if (!de->di_fname[0]) - continue; - if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK))) - continue; - if (!(de->di_status & QNX4_FILE_LINK)) - size = QNX4_SHORT_NAME_MAX; - else - size = QNX4_NAME_MAX; - size = strnlen(de->di_fname, size); - QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname)); - if (!(de->di_status & QNX4_FILE_LINK)) - ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1; - else { - le = (struct qnx4_link_info*)de; - ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) * - QNX4_INODES_PER_BLOCK + - le->dl_inode_ndx; - } - if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) { - brelse(bh); - return 0; + size = strlen(de->di_fname); + if (size) { + if ( !( de->di_status & QNX4_FILE_LINK ) && size > QNX4_SHORT_NAME_MAX ) + size = QNX4_SHORT_NAME_MAX; + else if ( size > QNX4_NAME_MAX ) + size = QNX4_NAME_MAX; + + if ( ( de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK) ) != 0 ) { + QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname)); + if ( ( de->di_status & QNX4_FILE_LINK ) == 0 ) + ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1; + else { + le = (struct qnx4_link_info*)de; + ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) * + QNX4_INODES_PER_BLOCK + + le->dl_inode_ndx; + } + if (filldir(dirent, de->di_fname, size, filp->f_pos, ino, DT_UNKNOWN) < 0) { + brelse(bh); + goto out; + } + } } + ix++; + filp->f_pos += QNX4_DIR_ENTRY_SIZE; } brelse(bh); } +out: return 0; } @@ -71,7 +75,7 @@ const struct file_operations qnx4_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = qnx4_readdir, + .readdir = qnx4_readdir, .fsync = generic_file_fsync, }; diff --git a/trunk/fs/qnx6/dir.c b/trunk/fs/qnx6/dir.c index 15b7d92ed60d..8798d065e400 100644 --- a/trunk/fs/qnx6/dir.c +++ b/trunk/fs/qnx6/dir.c @@ -65,8 +65,8 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb, static int qnx6_dir_longfilename(struct inode *inode, struct qnx6_long_dir_entry *de, - struct dir_context *ctx, - unsigned de_inode) + void *dirent, loff_t pos, + unsigned de_inode, filldir_t filldir) { struct qnx6_long_filename *lf; struct super_block *s = inode->i_sb; @@ -104,7 +104,8 @@ static int qnx6_dir_longfilename(struct inode *inode, QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s inode:%u\n", lf_size, lf->lf_fname, de_inode)); - if (!dir_emit(ctx, lf->lf_fname, lf_size, de_inode, DT_UNKNOWN)) { + if (filldir(dirent, lf->lf_fname, lf_size, pos, de_inode, + DT_UNKNOWN) < 0) { qnx6_put_page(page); return 0; } @@ -114,19 +115,18 @@ static int qnx6_dir_longfilename(struct inode *inode, return 1; } -static int qnx6_readdir(struct file *file, struct dir_context *ctx) +static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); struct super_block *s = inode->i_sb; struct qnx6_sb_info *sbi = QNX6_SB(s); - loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1); + loff_t pos = filp->f_pos & (QNX6_DIR_ENTRY_SIZE - 1); unsigned long npages = dir_pages(inode); unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE; bool done = false; - ctx->pos = pos; - if (ctx->pos >= inode->i_size) + if (filp->f_pos >= inode->i_size) return 0; for ( ; !done && n < npages; n++, start = 0) { @@ -137,11 +137,11 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx) if (IS_ERR(page)) { printk(KERN_ERR "qnx6_readdir: read failed\n"); - ctx->pos = (n + 1) << PAGE_CACHE_SHIFT; + filp->f_pos = (n + 1) << PAGE_CACHE_SHIFT; return PTR_ERR(page); } de = ((struct qnx6_dir_entry *)page_address(page)) + start; - for (; i < limit; i++, de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) { + for (; i < limit; i++, de++, pos += QNX6_DIR_ENTRY_SIZE) { int size = de->de_size; u32 no_inode = fs32_to_cpu(sbi, de->de_inode); @@ -154,7 +154,8 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx) structure / block */ if (!qnx6_dir_longfilename(inode, (struct qnx6_long_dir_entry *)de, - ctx, no_inode)) { + dirent, pos, no_inode, + filldir)) { done = true; break; } @@ -162,8 +163,9 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx) QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s" " inode:%u\n", size, de->de_fname, no_inode)); - if (!dir_emit(ctx, de->de_fname, size, - no_inode, DT_UNKNOWN)) { + if (filldir(dirent, de->de_fname, size, + pos, no_inode, DT_UNKNOWN) + < 0) { done = true; break; } @@ -171,6 +173,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx) } qnx6_put_page(page); } + filp->f_pos = pos; return 0; } @@ -279,7 +282,7 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, const struct file_operations qnx6_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = qnx6_readdir, + .readdir = qnx6_readdir, .fsync = generic_file_fsync, }; diff --git a/trunk/fs/read_write.c b/trunk/fs/read_write.c index 2cefa417be34..03430008704e 100644 --- a/trunk/fs/read_write.c +++ b/trunk/fs/read_write.c @@ -1064,7 +1064,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, struct fd in, out; struct inode *in_inode, *out_inode; loff_t pos; - loff_t out_pos; ssize_t retval; int fl; @@ -1078,14 +1077,12 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, if (!(in.file->f_mode & FMODE_READ)) goto fput_in; retval = -ESPIPE; - if (!ppos) { - pos = in.file->f_pos; - } else { - pos = *ppos; + if (!ppos) + ppos = &in.file->f_pos; + else if (!(in.file->f_mode & FMODE_PREAD)) goto fput_in; - } - retval = rw_verify_area(READ, in.file, &pos, count); + retval = rw_verify_area(READ, in.file, ppos, count); if (retval < 0) goto fput_in; count = retval; @@ -1102,8 +1099,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, retval = -EINVAL; in_inode = file_inode(in.file); out_inode = file_inode(out.file); - out_pos = out.file->f_pos; - retval = rw_verify_area(WRITE, out.file, &out_pos, count); + retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count); if (retval < 0) goto fput_out; count = retval; @@ -1111,6 +1107,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, if (!max) max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); + pos = *ppos; if (unlikely(pos + count > max)) { retval = -EOVERFLOW; if (pos >= max) @@ -1129,23 +1126,18 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, if (in.file->f_flags & O_NONBLOCK) fl = SPLICE_F_NONBLOCK; #endif - retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl); + retval = do_splice_direct(in.file, ppos, out.file, count, fl); if (retval > 0) { add_rchar(current, retval); add_wchar(current, retval); fsnotify_access(in.file); fsnotify_modify(out.file); - out.file->f_pos = out_pos; - if (ppos) - *ppos = pos; - else - in.file->f_pos = pos; } inc_syscr(current); inc_syscw(current); - if (pos > max) + if (*ppos > max) retval = -EOVERFLOW; fput_out: diff --git a/trunk/fs/readdir.c b/trunk/fs/readdir.c index 93d71e574310..fee38e04fae4 100644 --- a/trunk/fs/readdir.c +++ b/trunk/fs/readdir.c @@ -20,11 +20,11 @@ #include -int iterate_dir(struct file *file, struct dir_context *ctx) +int vfs_readdir(struct file *file, filldir_t filler, void *buf) { struct inode *inode = file_inode(file); int res = -ENOTDIR; - if (!file->f_op || !file->f_op->iterate) + if (!file->f_op || !file->f_op->readdir) goto out; res = security_file_permission(file, MAY_READ); @@ -37,16 +37,15 @@ int iterate_dir(struct file *file, struct dir_context *ctx) res = -ENOENT; if (!IS_DEADDIR(inode)) { - ctx->pos = file->f_pos; - res = file->f_op->iterate(file, ctx); - file->f_pos = ctx->pos; + res = file->f_op->readdir(file, buf, filler); file_accessed(file); } mutex_unlock(&inode->i_mutex); out: return res; } -EXPORT_SYMBOL(iterate_dir); + +EXPORT_SYMBOL(vfs_readdir); /* * Traditional linux readdir() handling.. @@ -67,7 +66,6 @@ struct old_linux_dirent { }; struct readdir_callback { - struct dir_context ctx; struct old_linux_dirent __user * dirent; int result; }; @@ -75,7 +73,7 @@ struct readdir_callback { static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { - struct readdir_callback *buf = (struct readdir_callback *) __buf; + struct readdir_callback * buf = (struct readdir_callback *) __buf; struct old_linux_dirent __user * dirent; unsigned long d_ino; @@ -109,15 +107,15 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, { int error; struct fd f = fdget(fd); - struct readdir_callback buf = { - .ctx.actor = fillonedir, - .dirent = dirent - }; + struct readdir_callback buf; if (!f.file) return -EBADF; - error = iterate_dir(f.file, &buf.ctx); + buf.result = 0; + buf.dirent = dirent; + + error = vfs_readdir(f.file, fillonedir, &buf); if (buf.result) error = buf.result; @@ -139,7 +137,6 @@ struct linux_dirent { }; struct getdents_callback { - struct dir_context ctx; struct linux_dirent __user * current_dir; struct linux_dirent __user * previous; int count; @@ -194,11 +191,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, { struct fd f; struct linux_dirent __user * lastdirent; - struct getdents_callback buf = { - .ctx.actor = filldir, - .count = count, - .current_dir = dirent - }; + struct getdents_callback buf; int error; if (!access_ok(VERIFY_WRITE, dirent, count)) @@ -208,12 +201,17 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, if (!f.file) return -EBADF; - error = iterate_dir(f.file, &buf.ctx); + buf.current_dir = dirent; + buf.previous = NULL; + buf.count = count; + buf.error = 0; + + error = vfs_readdir(f.file, filldir, &buf); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { - if (put_user(buf.ctx.pos, &lastdirent->d_off)) + if (put_user(f.file->f_pos, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.count; @@ -223,7 +221,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, } struct getdents_callback64 { - struct dir_context ctx; struct linux_dirent64 __user * current_dir; struct linux_dirent64 __user * previous; int count; @@ -274,11 +271,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, { struct fd f; struct linux_dirent64 __user * lastdirent; - struct getdents_callback64 buf = { - .ctx.actor = filldir64, - .count = count, - .current_dir = dirent - }; + struct getdents_callback64 buf; int error; if (!access_ok(VERIFY_WRITE, dirent, count)) @@ -288,12 +281,17 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, if (!f.file) return -EBADF; - error = iterate_dir(f.file, &buf.ctx); + buf.current_dir = dirent; + buf.previous = NULL; + buf.count = count; + buf.error = 0; + + error = vfs_readdir(f.file, filldir64, &buf); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { - typeof(lastdirent->d_off) d_off = buf.ctx.pos; + typeof(lastdirent->d_off) d_off = f.file->f_pos; if (__put_user(d_off, &lastdirent->d_off)) error = -EFAULT; else diff --git a/trunk/fs/reiserfs/dir.c b/trunk/fs/reiserfs/dir.c index 03e4ca5624d6..66c53b642a88 100644 --- a/trunk/fs/reiserfs/dir.c +++ b/trunk/fs/reiserfs/dir.c @@ -13,14 +13,14 @@ extern const struct reiserfs_key MIN_KEY; -static int reiserfs_readdir(struct file *, struct dir_context *); +static int reiserfs_readdir(struct file *, void *, filldir_t); static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, int datasync); const struct file_operations reiserfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = reiserfs_readdir, + .readdir = reiserfs_readdir, .fsync = reiserfs_dir_fsync, .unlocked_ioctl = reiserfs_ioctl, #ifdef CONFIG_COMPAT @@ -50,15 +50,18 @@ static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, #define store_ih(where,what) copy_item_head (where, what) -static inline bool is_privroot_deh(struct inode *dir, struct reiserfs_de_head *deh) +static inline bool is_privroot_deh(struct dentry *dir, + struct reiserfs_de_head *deh) { - struct dentry *privroot = REISERFS_SB(dir->i_sb)->priv_root; - return (privroot->d_inode && + struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root; + return (dir == dir->d_parent && privroot->d_inode && deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); } -int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx) +int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, + filldir_t filldir, loff_t *pos) { + struct inode *inode = dentry->d_inode; struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */ INITIALIZE_PATH(path_to_entry); struct buffer_head *bh; @@ -78,7 +81,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx) /* form key for search the next directory entry using f_pos field of file structure */ - make_cpu_key(&pos_key, inode, ctx->pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3); + make_cpu_key(&pos_key, inode, *pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3); next_pos = cpu_key_k_offset(&pos_key); path_to_entry.reada = PATH_READA; @@ -123,6 +126,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx) entry_num++, deh++) { int d_reclen; char *d_name; + off_t d_off; ino_t d_ino; if (!de_visible(deh)) @@ -151,10 +155,11 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx) } /* Ignore the .reiserfs_priv entry */ - if (is_privroot_deh(inode, deh)) + if (is_privroot_deh(dentry, deh)) continue; - ctx->pos = deh_offset(deh); + d_off = deh_offset(deh); + *pos = d_off; d_ino = deh_objectid(deh); if (d_reclen <= 32) { local_buf = small_buf; @@ -182,9 +187,9 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx) * the write lock here for other waiters */ reiserfs_write_unlock(inode->i_sb); - if (!dir_emit - (ctx, local_buf, d_reclen, d_ino, - DT_UNKNOWN)) { + if (filldir + (dirent, local_buf, d_reclen, d_off, d_ino, + DT_UNKNOWN) < 0) { reiserfs_write_lock(inode->i_sb); if (local_buf != small_buf) { kfree(local_buf); @@ -199,8 +204,6 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx) next_pos = deh_offset(deh) + 1; if (item_moved(&tmp_ih, &path_to_entry)) { - set_cpu_key_k_offset(&pos_key, - next_pos); goto research; } } /* for */ @@ -232,7 +235,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx) } /* while */ end: - ctx->pos = next_pos; + *pos = next_pos; pathrelse(&path_to_entry); reiserfs_check_path(&path_to_entry); out: @@ -240,9 +243,10 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx) return ret; } -static int reiserfs_readdir(struct file *file, struct dir_context *ctx) +static int reiserfs_readdir(struct file *file, void *dirent, filldir_t filldir) { - return reiserfs_readdir_inode(file_inode(file), ctx); + struct dentry *dentry = file->f_path.dentry; + return reiserfs_readdir_dentry(dentry, dirent, filldir, &file->f_pos); } /* compose directory item containing "." and ".." entries (entries are diff --git a/trunk/fs/reiserfs/inode.c b/trunk/fs/reiserfs/inode.c index 0048cc16a6a8..77d6d47abc83 100644 --- a/trunk/fs/reiserfs/inode.c +++ b/trunk/fs/reiserfs/inode.c @@ -1811,16 +1811,11 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, TYPE_STAT_DATA, SD_SIZE, MAX_US_INT); memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE); args.dirid = le32_to_cpu(ih.ih_key.k_dir_id); - - reiserfs_write_unlock(inode->i_sb); - err = insert_inode_locked4(inode, args.objectid, - reiserfs_find_actor, &args); - reiserfs_write_lock(inode->i_sb); - if (err) { + if (insert_inode_locked4(inode, args.objectid, + reiserfs_find_actor, &args) < 0) { err = -EINVAL; goto out_bad_inode; } - if (old_format_only(sb)) /* not a perfect generation count, as object ids can be reused, but ** this is as good as reiserfs can do right now. @@ -2975,19 +2970,16 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh) } /* clm -- taken from fs/buffer.c:block_invalidate_page */ -static void reiserfs_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void reiserfs_invalidatepage(struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; struct inode *inode = page->mapping->host; unsigned int curr_off = 0; - unsigned int stop = offset + length; - int partial_page = (offset || length < PAGE_CACHE_SIZE); int ret = 1; BUG_ON(!PageLocked(page)); - if (!partial_page) + if (offset == 0) ClearPageChecked(page); if (!page_has_buffers(page)) @@ -2999,9 +2991,6 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset, unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; - if (next_off > stop) - goto out; - /* * is this block fully invalidated? */ @@ -3020,7 +3009,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset, * The get_block cached value has been unconditionally invalidated, * so real IO is not possible anymore. */ - if (!partial_page && ret) { + if (!offset && ret) { ret = try_to_release_page(page, 0); /* maybe should BUG_ON(!ret); - neilb */ } diff --git a/trunk/fs/reiserfs/reiserfs.h b/trunk/fs/reiserfs/reiserfs.h index 3df5ce6c724d..157e474ab303 100644 --- a/trunk/fs/reiserfs/reiserfs.h +++ b/trunk/fs/reiserfs/reiserfs.h @@ -2709,7 +2709,7 @@ extern const struct inode_operations reiserfs_dir_inode_operations; extern const struct inode_operations reiserfs_symlink_inode_operations; extern const struct inode_operations reiserfs_special_inode_operations; extern const struct file_operations reiserfs_dir_operations; -int reiserfs_readdir_inode(struct inode *, struct dir_context *); +int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *); /* tail_conversion.c */ int direct2indirect(struct reiserfs_transaction_handle *, struct inode *, diff --git a/trunk/fs/reiserfs/xattr.c b/trunk/fs/reiserfs/xattr.c index c69cdd749f09..4cce1d9552fb 100644 --- a/trunk/fs/reiserfs/xattr.c +++ b/trunk/fs/reiserfs/xattr.c @@ -171,7 +171,6 @@ static struct dentry *open_xa_dir(const struct inode *inode, int flags) * modifying extended attributes. This includes operations such as permissions * or ownership changes, object deletions, etc. */ struct reiserfs_dentry_buf { - struct dir_context ctx; struct dentry *xadir; int count; struct dentry *dentries[8]; @@ -224,8 +223,9 @@ static int reiserfs_for_each_xattr(struct inode *inode, { struct dentry *dir; int i, err = 0; + loff_t pos = 0; struct reiserfs_dentry_buf buf = { - .ctx.actor = fill_with_dentries, + .count = 0, }; /* Skip out, an xattr has no xattrs associated with it */ @@ -249,27 +249,29 @@ static int reiserfs_for_each_xattr(struct inode *inode, reiserfs_write_lock(inode->i_sb); buf.xadir = dir; - while (1) { - err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx); - if (err) - break; - if (!buf.count) - break; - for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) { + err = reiserfs_readdir_dentry(dir, &buf, fill_with_dentries, &pos); + while ((err == 0 || err == -ENOSPC) && buf.count) { + err = 0; + + for (i = 0; i < buf.count && buf.dentries[i]; i++) { + int lerr = 0; struct dentry *dentry = buf.dentries[i]; - if (!S_ISDIR(dentry->d_inode->i_mode)) - err = action(dentry, data); + if (err == 0 && !S_ISDIR(dentry->d_inode->i_mode)) + lerr = action(dentry, data); dput(dentry); buf.dentries[i] = NULL; + err = lerr ?: err; } - if (err) - break; buf.count = 0; + if (!err) + err = reiserfs_readdir_dentry(dir, &buf, + fill_with_dentries, &pos); } mutex_unlock(&dir->d_inode->i_mutex); + /* Clean up after a failed readdir */ cleanup_dentry_buf(&buf); if (!err) { @@ -316,19 +318,7 @@ static int delete_one_xattr(struct dentry *dentry, void *data) static int chown_one_xattr(struct dentry *dentry, void *data) { struct iattr *attrs = data; - int ia_valid = attrs->ia_valid; - int err; - - /* - * We only want the ownership bits. Otherwise, we'll do - * things like change a directory to a regular file if - * ATTR_MODE is set. - */ - attrs->ia_valid &= (ATTR_UID|ATTR_GID); - err = reiserfs_setattr(dentry, attrs); - attrs->ia_valid = ia_valid; - - return err; + return reiserfs_setattr(dentry, attrs); } /* No i_mutex, but the inode is unconnected. */ @@ -798,7 +788,6 @@ int reiserfs_removexattr(struct dentry *dentry, const char *name) } struct listxattr_buf { - struct dir_context ctx; size_t size; size_t pos; char *buf; @@ -844,8 +833,8 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size) { struct dentry *dir; int err = 0; + loff_t pos = 0; struct listxattr_buf buf = { - .ctx.actor = listxattr_filler, .dentry = dentry, .buf = buffer, .size = buffer ? size : 0, @@ -867,7 +856,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size) } mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR); - err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx); + err = reiserfs_readdir_dentry(dir, &buf, listxattr_filler, &pos); mutex_unlock(&dir->d_inode->i_mutex); if (!err) diff --git a/trunk/fs/reiserfs/xattr_acl.c b/trunk/fs/reiserfs/xattr_acl.c index 6c8767fdfc6a..d7c01ef64eda 100644 --- a/trunk/fs/reiserfs/xattr_acl.c +++ b/trunk/fs/reiserfs/xattr_acl.c @@ -443,9 +443,6 @@ int reiserfs_acl_chmod(struct inode *inode) int depth; int error; - if (IS_PRIVATE(inode)) - return 0; - if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; diff --git a/trunk/fs/romfs/super.c b/trunk/fs/romfs/super.c index ff1d3d42e72a..15cbc41ee365 100644 --- a/trunk/fs/romfs/super.c +++ b/trunk/fs/romfs/super.c @@ -145,18 +145,19 @@ static const struct address_space_operations romfs_aops = { /* * read the entries from a directory */ -static int romfs_readdir(struct file *file, struct dir_context *ctx) +static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - struct inode *i = file_inode(file); + struct inode *i = file_inode(filp); struct romfs_inode ri; unsigned long offset, maxoff; int j, ino, nextfh; + int stored = 0; char fsname[ROMFS_MAXFN]; /* XXX dynamic? */ int ret; maxoff = romfs_maxsize(i->i_sb); - offset = ctx->pos; + offset = filp->f_pos; if (!offset) { offset = i->i_ino & ROMFH_MASK; ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE); @@ -169,10 +170,10 @@ static int romfs_readdir(struct file *file, struct dir_context *ctx) for (;;) { if (!offset || offset >= maxoff) { offset = maxoff; - ctx->pos = offset; + filp->f_pos = offset; goto out; } - ctx->pos = offset; + filp->f_pos = offset; /* Fetch inode info */ ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE); @@ -193,14 +194,16 @@ static int romfs_readdir(struct file *file, struct dir_context *ctx) nextfh = be32_to_cpu(ri.next); if ((nextfh & ROMFH_TYPE) == ROMFH_HRD) ino = be32_to_cpu(ri.spec); - if (!dir_emit(ctx, fsname, j, ino, - romfs_dtype_table[nextfh & ROMFH_TYPE])) + if (filldir(dirent, fsname, j, offset, ino, + romfs_dtype_table[nextfh & ROMFH_TYPE]) < 0) goto out; + stored++; offset = nextfh & ROMFH_MASK; } + out: - return 0; + return stored; } /* @@ -278,7 +281,7 @@ static struct dentry *romfs_lookup(struct inode *dir, struct dentry *dentry, static const struct file_operations romfs_dir_operations = { .read = generic_read_dir, - .iterate = romfs_readdir, + .readdir = romfs_readdir, .llseek = default_llseek, }; diff --git a/trunk/fs/splice.c b/trunk/fs/splice.c index d37431dd60a1..e6b25598c8c4 100644 --- a/trunk/fs/splice.c +++ b/trunk/fs/splice.c @@ -1274,7 +1274,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, { struct file *file = sd->u.file; - return do_splice_from(pipe, file, sd->opos, sd->total_len, + return do_splice_from(pipe, file, &file->f_pos, sd->total_len, sd->flags); } @@ -1283,7 +1283,6 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, * @in: file to splice from * @ppos: input file offset * @out: file to splice to - * @opos: output file offset * @len: number of bytes to splice * @flags: splice modifier flags * @@ -1295,7 +1294,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, * */ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, - loff_t *opos, size_t len, unsigned int flags) + size_t len, unsigned int flags) { struct splice_desc sd = { .len = len, @@ -1303,7 +1302,6 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, .flags = flags, .pos = *ppos, .u.file = out, - .opos = opos, }; long ret; @@ -1327,7 +1325,7 @@ static long do_splice(struct file *in, loff_t __user *off_in, { struct pipe_inode_info *ipipe; struct pipe_inode_info *opipe; - loff_t offset; + loff_t offset, *off; long ret; ipipe = get_pipe_info(in); @@ -1358,15 +1356,13 @@ static long do_splice(struct file *in, loff_t __user *off_in, return -EINVAL; if (copy_from_user(&offset, off_out, sizeof(loff_t))) return -EFAULT; - } else { - offset = out->f_pos; - } + off = &offset; + } else + off = &out->f_pos; - ret = do_splice_from(ipipe, out, &offset, len, flags); + ret = do_splice_from(ipipe, out, off, len, flags); - if (!off_out) - out->f_pos = offset; - else if (copy_to_user(off_out, &offset, sizeof(loff_t))) + if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) ret = -EFAULT; return ret; @@ -1380,15 +1376,13 @@ static long do_splice(struct file *in, loff_t __user *off_in, return -EINVAL; if (copy_from_user(&offset, off_in, sizeof(loff_t))) return -EFAULT; - } else { - offset = in->f_pos; - } + off = &offset; + } else + off = &in->f_pos; - ret = do_splice_to(in, &offset, opipe, len, flags); + ret = do_splice_to(in, off, opipe, len, flags); - if (!off_in) - in->f_pos = offset; - else if (copy_to_user(off_in, &offset, sizeof(loff_t))) + if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) ret = -EFAULT; return ret; diff --git a/trunk/fs/squashfs/dir.c b/trunk/fs/squashfs/dir.c index f7f527bf8c10..57dc70ebbb19 100644 --- a/trunk/fs/squashfs/dir.c +++ b/trunk/fs/squashfs/dir.c @@ -100,7 +100,7 @@ static int get_dir_index_using_offset(struct super_block *sb, } -static int squashfs_readdir(struct file *file, struct dir_context *ctx) +static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) { struct inode *inode = file_inode(file); struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; @@ -127,11 +127,11 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx) * It also means that the external f_pos is offset by 3 from the * on-disk directory f_pos. */ - while (ctx->pos < 3) { + while (file->f_pos < 3) { char *name; int i_ino; - if (ctx->pos == 0) { + if (file->f_pos == 0) { name = "."; size = 1; i_ino = inode->i_ino; @@ -141,18 +141,24 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx) i_ino = squashfs_i(inode)->parent; } - if (!dir_emit(ctx, name, size, i_ino, - squashfs_filetype_table[1])) + TRACE("Calling filldir(%p, %s, %d, %lld, %d, %d)\n", + dirent, name, size, file->f_pos, i_ino, + squashfs_filetype_table[1]); + + if (filldir(dirent, name, size, file->f_pos, i_ino, + squashfs_filetype_table[1]) < 0) { + TRACE("Filldir returned less than 0\n"); goto finish; + } - ctx->pos += size; + file->f_pos += size; } length = get_dir_index_using_offset(inode->i_sb, &block, &offset, squashfs_i(inode)->dir_idx_start, squashfs_i(inode)->dir_idx_offset, squashfs_i(inode)->dir_idx_cnt, - ctx->pos); + file->f_pos); while (length < i_size_read(inode)) { /* @@ -192,7 +198,7 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx) length += sizeof(*dire) + size; - if (ctx->pos >= length) + if (file->f_pos >= length) continue; dire->name[size] = '\0'; @@ -200,12 +206,22 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx) ((short) le16_to_cpu(dire->inode_number)); type = le16_to_cpu(dire->type); - if (!dir_emit(ctx, dire->name, size, + TRACE("Calling filldir(%p, %s, %d, %lld, %x:%x, %d, %d)" + "\n", dirent, dire->name, size, + file->f_pos, + le32_to_cpu(dirh.start_block), + le16_to_cpu(dire->offset), + inode_number, + squashfs_filetype_table[type]); + + if (filldir(dirent, dire->name, size, file->f_pos, inode_number, - squashfs_filetype_table[type])) + squashfs_filetype_table[type]) < 0) { + TRACE("Filldir returned less than 0\n"); goto finish; + } - ctx->pos = length; + file->f_pos = length; } } @@ -222,6 +238,6 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx) const struct file_operations squashfs_dir_ops = { .read = generic_read_dir, - .iterate = squashfs_readdir, + .readdir = squashfs_readdir, .llseek = default_llseek, }; diff --git a/trunk/fs/sysfs/dir.c b/trunk/fs/sysfs/dir.c index 4cfd742d260d..e8e0e71b29d5 100644 --- a/trunk/fs/sysfs/dir.c +++ b/trunk/fs/sysfs/dir.c @@ -998,38 +998,68 @@ static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns, return pos; } -static int sysfs_readdir(struct file *file, struct dir_context *ctx) +static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) { - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = filp->f_path.dentry; struct sysfs_dirent * parent_sd = dentry->d_fsdata; - struct sysfs_dirent *pos = file->private_data; + struct sysfs_dirent *pos = filp->private_data; enum kobj_ns_type type; const void *ns; + ino_t ino; + loff_t off; type = sysfs_ns_type(parent_sd); ns = sysfs_info(dentry->d_sb)->ns[type]; - if (!dir_emit_dots(file, ctx)) - return 0; + if (filp->f_pos == 0) { + ino = parent_sd->s_ino; + if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0) + filp->f_pos++; + else + return 0; + } + if (filp->f_pos == 1) { + if (parent_sd->s_parent) + ino = parent_sd->s_parent->s_ino; + else + ino = parent_sd->s_ino; + if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0) + filp->f_pos++; + else + return 0; + } mutex_lock(&sysfs_mutex); - for (pos = sysfs_dir_pos(ns, parent_sd, ctx->pos, pos); + off = filp->f_pos; + for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos); pos; - pos = sysfs_dir_next_pos(ns, parent_sd, ctx->pos, pos)) { - const char *name = pos->s_name; - unsigned int type = dt_type(pos); - int len = strlen(name); - ino_t ino = pos->s_ino; - ctx->pos = pos->s_hash; - file->private_data = sysfs_get(pos); + pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) { + const char * name; + unsigned int type; + int len, ret; + + name = pos->s_name; + len = strlen(name); + ino = pos->s_ino; + type = dt_type(pos); + off = filp->f_pos = pos->s_hash; + filp->private_data = sysfs_get(pos); mutex_unlock(&sysfs_mutex); - if (!dir_emit(ctx, name, len, ino, type)) - return 0; + ret = filldir(dirent, name, len, off, ino, type); mutex_lock(&sysfs_mutex); + if (ret < 0) + break; } mutex_unlock(&sysfs_mutex); - file->private_data = NULL; - ctx->pos = INT_MAX; + + /* don't reference last entry if its refcount is dropped */ + if (!pos) { + filp->private_data = NULL; + + /* EOF and not changed as 0 or 1 in read/write path */ + if (off == filp->f_pos && off > 1) + filp->f_pos = INT_MAX; + } return 0; } @@ -1047,7 +1077,7 @@ static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence) const struct file_operations sysfs_dir_operations = { .read = generic_read_dir, - .iterate = sysfs_readdir, + .readdir = sysfs_readdir, .release = sysfs_dir_release, .llseek = sysfs_dir_llseek, }; diff --git a/trunk/fs/sysv/dir.c b/trunk/fs/sysv/dir.c index d42291d08215..3799e8dac3eb 100644 --- a/trunk/fs/sysv/dir.c +++ b/trunk/fs/sysv/dir.c @@ -18,12 +18,12 @@ #include #include "sysv.h" -static int sysv_readdir(struct file *, struct dir_context *); +static int sysv_readdir(struct file *, void *, filldir_t); const struct file_operations sysv_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = sysv_readdir, + .readdir = sysv_readdir, .fsync = generic_file_fsync, }; @@ -65,21 +65,18 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n) return page; } -static int sysv_readdir(struct file *file, struct dir_context *ctx) +static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir) { - unsigned long pos = ctx->pos; - struct inode *inode = file_inode(file); + unsigned long pos = filp->f_pos; + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; + unsigned offset = pos & ~PAGE_CACHE_MASK; + unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); - unsigned offset; - unsigned long n; - ctx->pos = pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1); + pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1); if (pos >= inode->i_size) - return 0; - - offset = pos & ~PAGE_CACHE_MASK; - n = pos >> PAGE_CACHE_SHIFT; + goto done; for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; @@ -91,21 +88,29 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx) kaddr = (char *)page_address(page); de = (struct sysv_dir_entry *)(kaddr+offset); limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE; - for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) { + for ( ;(char*)de <= limit; de++) { char *name = de->name; + int over; if (!de->inode) continue; - if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN), + offset = (char *)de - kaddr; + + over = filldir(dirent, name, strnlen(name,SYSV_NAMELEN), + ((loff_t)n<inode), - DT_UNKNOWN)) { + DT_UNKNOWN); + if (over) { dir_put_page(page); - return 0; + goto done; } } dir_put_page(page); } + +done: + filp->f_pos = ((loff_t)n << PAGE_CACHE_SHIFT) | offset; return 0; } diff --git a/trunk/fs/ubifs/dir.c b/trunk/fs/ubifs/dir.c index 6b4947f75af7..de08c92f2e23 100644 --- a/trunk/fs/ubifs/dir.c +++ b/trunk/fs/ubifs/dir.c @@ -346,46 +346,38 @@ static unsigned int vfs_dent_type(uint8_t type) * This means that UBIFS cannot support NFS which requires full * 'seekdir()'/'telldir()' support. */ -static int ubifs_readdir(struct file *file, struct dir_context *ctx) +static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) { - int err; + int err, over = 0; struct qstr nm; union ubifs_key key; struct ubifs_dent_node *dent; struct inode *dir = file_inode(file); struct ubifs_info *c = dir->i_sb->s_fs_info; - dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, ctx->pos); + dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos); - if (ctx->pos > UBIFS_S_KEY_HASH_MASK || ctx->pos == 2) + if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2) /* * The directory was seek'ed to a senseless position or there * are no more entries. */ return 0; - if (file->f_version == 0) { - /* - * The file was seek'ed, which means that @file->private_data - * is now invalid. This may also be just the first - * 'ubifs_readdir()' invocation, in which case - * @file->private_data is NULL, and the below code is - * basically a no-op. - */ - kfree(file->private_data); - file->private_data = NULL; + /* File positions 0 and 1 correspond to "." and ".." */ + if (file->f_pos == 0) { + ubifs_assert(!file->private_data); + over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR); + if (over) + return 0; + file->f_pos = 1; } - /* - * 'generic_file_llseek()' unconditionally sets @file->f_version to - * zero, and we use this for detecting whether the file was seek'ed. - */ - file->f_version = 1; - - /* File positions 0 and 1 correspond to "." and ".." */ - if (ctx->pos < 2) { + if (file->f_pos == 1) { ubifs_assert(!file->private_data); - if (!dir_emit_dots(file, ctx)) + over = filldir(dirent, "..", 2, 1, + parent_ino(file->f_path.dentry), DT_DIR); + if (over) return 0; /* Find the first entry in TNC and save it */ @@ -397,7 +389,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx) goto out; } - ctx->pos = key_hash_flash(c, &dent->key); + file->f_pos = key_hash_flash(c, &dent->key); file->private_data = dent; } @@ -405,16 +397,17 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx) if (!dent) { /* * The directory was seek'ed to and is now readdir'ed. - * Find the entry corresponding to @ctx->pos or the closest one. + * Find the entry corresponding to @file->f_pos or the + * closest one. */ - dent_key_init_hash(c, &key, dir->i_ino, ctx->pos); + dent_key_init_hash(c, &key, dir->i_ino, file->f_pos); nm.name = NULL; dent = ubifs_tnc_next_ent(c, &key, &nm); if (IS_ERR(dent)) { err = PTR_ERR(dent); goto out; } - ctx->pos = key_hash_flash(c, &dent->key); + file->f_pos = key_hash_flash(c, &dent->key); file->private_data = dent; } @@ -426,9 +419,10 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx) ubifs_inode(dir)->creat_sqnum); nm.len = le16_to_cpu(dent->nlen); - if (!dir_emit(ctx, dent->name, nm.len, + over = filldir(dirent, dent->name, nm.len, file->f_pos, le64_to_cpu(dent->inum), - vfs_dent_type(dent->type))) + vfs_dent_type(dent->type)); + if (over) return 0; /* Switch to the next entry */ @@ -441,7 +435,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx) } kfree(file->private_data); - ctx->pos = key_hash_flash(c, &dent->key); + file->f_pos = key_hash_flash(c, &dent->key); file->private_data = dent; cond_resched(); } @@ -454,11 +448,18 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx) kfree(file->private_data); file->private_data = NULL; - /* 2 is a special value indicating that there are no more direntries */ - ctx->pos = 2; + file->f_pos = 2; return 0; } +/* If a directory is seeked, we have to free saved readdir() state */ +static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence) +{ + kfree(file->private_data); + file->private_data = NULL; + return generic_file_llseek(file, offset, whence); +} + /* Free saved readdir() state when the directory is closed */ static int ubifs_dir_release(struct inode *dir, struct file *file) { @@ -1176,10 +1177,10 @@ const struct inode_operations ubifs_dir_inode_operations = { }; const struct file_operations ubifs_dir_operations = { - .llseek = generic_file_llseek, + .llseek = ubifs_dir_llseek, .release = ubifs_dir_release, .read = generic_read_dir, - .iterate = ubifs_readdir, + .readdir = ubifs_readdir, .fsync = ubifs_fsync, .unlocked_ioctl = ubifs_ioctl, #ifdef CONFIG_COMPAT diff --git a/trunk/fs/ubifs/file.c b/trunk/fs/ubifs/file.c index 123c79b7261e..14374530784c 100644 --- a/trunk/fs/ubifs/file.c +++ b/trunk/fs/ubifs/file.c @@ -1277,14 +1277,13 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr) return err; } -static void ubifs_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void ubifs_invalidatepage(struct page *page, unsigned long offset) { struct inode *inode = page->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; ubifs_assert(PagePrivate(page)); - if (offset || length < PAGE_CACHE_SIZE) + if (offset) /* Partial page remains dirty */ return; diff --git a/trunk/fs/udf/dir.c b/trunk/fs/udf/dir.c index a012c51caffd..b3e93f5e17c3 100644 --- a/trunk/fs/udf/dir.c +++ b/trunk/fs/udf/dir.c @@ -35,16 +35,14 @@ #include "udf_i.h" #include "udf_sb.h" - -static int udf_readdir(struct file *file, struct dir_context *ctx) +static int do_udf_readdir(struct inode *dir, struct file *filp, + filldir_t filldir, void *dirent) { - struct inode *dir = file_inode(file); - struct udf_inode_info *iinfo = UDF_I(dir); struct udf_fileident_bh fibh = { .sbh = NULL, .ebh = NULL}; struct fileIdentDesc *fi = NULL; struct fileIdentDesc cfi; int block, iblock; - loff_t nf_pos; + loff_t nf_pos = (filp->f_pos - 1) << 2; int flen; unsigned char *fname = NULL; unsigned char *nameptr; @@ -56,14 +54,10 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) uint32_t elen; sector_t offset; int i, num, ret = 0; + unsigned int dt_type; struct extent_position epos = { NULL, 0, {0, 0} }; + struct udf_inode_info *iinfo; - if (ctx->pos == 0) { - if (!dir_emit_dot(file, ctx)) - return 0; - ctx->pos = 1; - } - nf_pos = (ctx->pos - 1) << 2; if (nf_pos >= size) goto out; @@ -77,6 +71,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) nf_pos = udf_ext0_offset(dir); fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1); + iinfo = UDF_I(dir); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits, &epos, &eloc, &elen, &offset) @@ -121,9 +116,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) } while (nf_pos < size) { - struct kernel_lb_addr tloc; - - ctx->pos = (nf_pos >> 2) + 1; + filp->f_pos = (nf_pos >> 2) + 1; fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, &elen, &offset); @@ -162,22 +155,24 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) } if (cfi.fileCharacteristics & FID_FILE_CHAR_PARENT) { - if (!dir_emit_dotdot(file, ctx)) - goto out; - continue; - } + iblock = parent_ino(filp->f_path.dentry); + flen = 2; + memcpy(fname, "..", flen); + dt_type = DT_DIR; + } else { + struct kernel_lb_addr tloc = lelb_to_cpu(cfi.icb.extLocation); - flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); - if (!flen) - continue; + iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0); + flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); + dt_type = DT_UNKNOWN; + } - tloc = lelb_to_cpu(cfi.icb.extLocation); - iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0); - if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN)) + if (flen && filldir(dirent, fname, flen, filp->f_pos, + iblock, dt_type) < 0) goto out; } /* end while */ - ctx->pos = (nf_pos >> 2) + 1; + filp->f_pos = (nf_pos >> 2) + 1; out: if (fibh.sbh != fibh.ebh) @@ -189,11 +184,27 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) return ret; } +static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir) +{ + struct inode *dir = file_inode(filp); + int result; + + if (filp->f_pos == 0) { + if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) { + return 0; + } + filp->f_pos++; + } + + result = do_udf_readdir(dir, filp, filldir, dirent); + return result; +} + /* readdir and lookup functions */ const struct file_operations udf_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = udf_readdir, + .readdir = udf_readdir, .unlocked_ioctl = udf_ioctl, .fsync = generic_file_fsync, }; diff --git a/trunk/fs/ufs/dir.c b/trunk/fs/ufs/dir.c index 0ecc2cebed8f..3a75ca09c506 100644 --- a/trunk/fs/ufs/dir.c +++ b/trunk/fs/ufs/dir.c @@ -430,16 +430,16 @@ ufs_validate_entry(struct super_block *sb, char *base, * This is blatantly stolen from ext2fs */ static int -ufs_readdir(struct file *file, struct dir_context *ctx) +ufs_readdir(struct file *filp, void *dirent, filldir_t filldir) { - loff_t pos = ctx->pos; - struct inode *inode = file_inode(file); + loff_t pos = filp->f_pos; + struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = ufs_dir_pages(inode); unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); - int need_revalidate = file->f_version != inode->i_version; + int need_revalidate = filp->f_version != inode->i_version; unsigned flags = UFS_SB(sb)->s_flags; UFSD("BEGIN\n"); @@ -457,16 +457,16 @@ ufs_readdir(struct file *file, struct dir_context *ctx) ufs_error(sb, __func__, "bad page in #%lu", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + filp->f_pos += PAGE_CACHE_SIZE - offset; return -EIO; } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); - ctx->pos = (n<f_pos = (n<f_version = inode->i_version; + filp->f_version = inode->i_version; need_revalidate = 0; } de = (struct ufs_dir_entry *)(kaddr+offset); @@ -479,8 +479,11 @@ ufs_readdir(struct file *file, struct dir_context *ctx) return -EIO; } if (de->d_ino) { + int over; unsigned char d_type = DT_UNKNOWN; + offset = (char *)de - kaddr; + UFSD("filldir(%s,%u)\n", de->d_name, fs32_to_cpu(sb, de->d_ino)); UFSD("namlen %u\n", ufs_get_de_namlen(sb, de)); @@ -488,15 +491,16 @@ ufs_readdir(struct file *file, struct dir_context *ctx) if ((flags & UFS_DE_MASK) == UFS_DE_44BSD) d_type = de->d_u.d_44.d_type; - if (!dir_emit(ctx, de->d_name, + over = filldir(dirent, de->d_name, ufs_get_de_namlen(sb, de), - fs32_to_cpu(sb, de->d_ino), - d_type)) { + (n<d_ino), d_type); + if (over) { ufs_put_page(page); return 0; } } - ctx->pos += fs16_to_cpu(sb, de->d_reclen); + filp->f_pos += fs16_to_cpu(sb, de->d_reclen); } ufs_put_page(page); } @@ -656,7 +660,7 @@ int ufs_empty_dir(struct inode * inode) const struct file_operations ufs_dir_operations = { .read = generic_read_dir, - .iterate = ufs_readdir, + .readdir = ufs_readdir, .fsync = generic_file_fsync, .llseek = generic_file_llseek, }; diff --git a/trunk/fs/xfs/xfs_acl.c b/trunk/fs/xfs/xfs_acl.c index 306d883d89bc..1d32f1d52763 100644 --- a/trunk/fs/xfs/xfs_acl.c +++ b/trunk/fs/xfs/xfs_acl.c @@ -21,8 +21,6 @@ #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_vnodeops.h" -#include "xfs_sb.h" -#include "xfs_mount.h" #include "xfs_trace.h" #include #include @@ -36,9 +34,7 @@ */ STATIC struct posix_acl * -xfs_acl_from_disk( - struct xfs_acl *aclp, - int max_entries) +xfs_acl_from_disk(struct xfs_acl *aclp) { struct posix_acl_entry *acl_e; struct posix_acl *acl; @@ -46,7 +42,7 @@ xfs_acl_from_disk( unsigned int count, i; count = be32_to_cpu(aclp->acl_cnt); - if (count > max_entries) + if (count > XFS_ACL_MAX_ENTRIES) return ERR_PTR(-EFSCORRUPTED); acl = posix_acl_alloc(count, GFP_KERNEL); @@ -112,9 +108,9 @@ xfs_get_acl(struct inode *inode, int type) struct xfs_inode *ip = XFS_I(inode); struct posix_acl *acl; struct xfs_acl *xfs_acl; + int len = sizeof(struct xfs_acl); unsigned char *ea_name; int error; - int len; acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) @@ -137,8 +133,8 @@ xfs_get_acl(struct inode *inode, int type) * If we have a cached ACLs value just return it, not need to * go out to the disk. */ - len = XFS_ACL_MAX_SIZE(ip->i_mount); - xfs_acl = kzalloc(len, GFP_KERNEL); + + xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); if (!xfs_acl) return ERR_PTR(-ENOMEM); @@ -157,7 +153,7 @@ xfs_get_acl(struct inode *inode, int type) goto out; } - acl = xfs_acl_from_disk(xfs_acl, XFS_ACL_MAX_ENTRIES(ip->i_mount)); + acl = xfs_acl_from_disk(xfs_acl); if (IS_ERR(acl)) goto out; @@ -193,17 +189,16 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) if (acl) { struct xfs_acl *xfs_acl; - int len = XFS_ACL_MAX_SIZE(ip->i_mount); + int len; - xfs_acl = kzalloc(len, GFP_KERNEL); + xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); if (!xfs_acl) return -ENOMEM; xfs_acl_to_disk(xfs_acl, acl); - - /* subtract away the unused acl entries */ - len -= sizeof(struct xfs_acl_entry) * - (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count); + len = sizeof(struct xfs_acl) - + (sizeof(struct xfs_acl_entry) * + (XFS_ACL_MAX_ENTRIES - acl->a_count)); error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, len, ATTR_ROOT); @@ -248,7 +243,7 @@ xfs_set_mode(struct inode *inode, umode_t mode) static int xfs_acl_exists(struct inode *inode, unsigned char *name) { - int len = XFS_ACL_MAX_SIZE(XFS_M(inode->i_sb)); + int len = sizeof(struct xfs_acl); return (xfs_attr_get(XFS_I(inode), name, NULL, &len, ATTR_ROOT|ATTR_KERNOVAL) == 0); @@ -384,7 +379,7 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name, goto out_release; error = -EINVAL; - if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb))) + if (acl->a_count > XFS_ACL_MAX_ENTRIES) goto out_release; if (type == ACL_TYPE_ACCESS) { diff --git a/trunk/fs/xfs/xfs_acl.h b/trunk/fs/xfs/xfs_acl.h index 4016a567b83c..39632d941354 100644 --- a/trunk/fs/xfs/xfs_acl.h +++ b/trunk/fs/xfs/xfs_acl.h @@ -22,36 +22,19 @@ struct inode; struct posix_acl; struct xfs_inode; +#define XFS_ACL_MAX_ENTRIES 25 #define XFS_ACL_NOT_PRESENT (-1) /* On-disk XFS access control list structure */ -struct xfs_acl_entry { - __be32 ae_tag; - __be32 ae_id; - __be16 ae_perm; - __be16 ae_pad; /* fill the implicit hole in the structure */ -}; - struct xfs_acl { - __be32 acl_cnt; - struct xfs_acl_entry acl_entry[0]; + __be32 acl_cnt; + struct xfs_acl_entry { + __be32 ae_tag; + __be32 ae_id; + __be16 ae_perm; + } acl_entry[XFS_ACL_MAX_ENTRIES]; }; -/* - * The number of ACL entries allowed is defined by the on-disk format. - * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is - * limited only by the maximum size of the xattr that stores the information. - */ -#define XFS_ACL_MAX_ENTRIES(mp) \ - (xfs_sb_version_hascrc(&mp->m_sb) \ - ? (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \ - sizeof(struct xfs_acl_entry) \ - : 25) - -#define XFS_ACL_MAX_SIZE(mp) \ - (sizeof(struct xfs_acl) + \ - sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp))) - /* On-disk XFS extended attribute names */ #define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE" #define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT" diff --git a/trunk/fs/xfs/xfs_aops.c b/trunk/fs/xfs/xfs_aops.c index 596ec71da00e..2b2691b73428 100644 --- a/trunk/fs/xfs/xfs_aops.c +++ b/trunk/fs/xfs/xfs_aops.c @@ -725,25 +725,6 @@ xfs_convert_page( (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, i_size_read(inode)); - /* - * If the current map does not span the entire page we are about to try - * to write, then give up. The only way we can write a page that spans - * multiple mappings in a single writeback iteration is via the - * xfs_vm_writepage() function. Data integrity writeback requires the - * entire page to be written in a single attempt, otherwise the part of - * the page we don't write here doesn't get written as part of the data - * integrity sync. - * - * For normal writeback, we also don't attempt to write partial pages - * here as it simply means that write_cache_pages() will see it under - * writeback and ignore the page until some point in the future, at - * which time this will be the only page in the file that needs - * writeback. Hence for more optimal IO patterns, we should always - * avoid partial page writeback due to multiple mappings on a page here. - */ - if (!xfs_imap_valid(inode, imap, end_offset)) - goto fail_unlock_page; - len = 1 << inode->i_blkbits; p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), PAGE_CACHE_SIZE); @@ -843,12 +824,10 @@ xfs_cluster_write( STATIC void xfs_vm_invalidatepage( struct page *page, - unsigned int offset, - unsigned int length) + unsigned long offset) { - trace_xfs_invalidatepage(page->mapping->host, page, offset, - length); - block_invalidatepage(page, offset, length); + trace_xfs_invalidatepage(page->mapping->host, page, offset); + block_invalidatepage(page, offset); } /* @@ -912,7 +891,7 @@ xfs_aops_discard_page( xfs_iunlock(ip, XFS_ILOCK_EXCL); out_invalidate: - xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE); + xfs_vm_invalidatepage(page, 0); return; } @@ -942,7 +921,7 @@ xfs_vm_writepage( int count = 0; int nonblocking = 0; - trace_xfs_writepage(inode, page, 0, 0); + trace_xfs_writepage(inode, page, 0); ASSERT(page_has_buffers(page)); @@ -1173,7 +1152,7 @@ xfs_vm_releasepage( { int delalloc, unwritten; - trace_xfs_releasepage(page->mapping->host, page, 0, 0); + trace_xfs_releasepage(page->mapping->host, page, 0); xfs_count_page_state(page, &delalloc, &unwritten); diff --git a/trunk/fs/xfs/xfs_attr_leaf.c b/trunk/fs/xfs/xfs_attr_leaf.c index 31d3cd129269..08d5457c948e 100644 --- a/trunk/fs/xfs/xfs_attr_leaf.c +++ b/trunk/fs/xfs/xfs_attr_leaf.c @@ -931,22 +931,20 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) */ int xfs_attr_shortform_allfit( - struct xfs_buf *bp, - struct xfs_inode *dp) + struct xfs_buf *bp, + struct xfs_inode *dp) { - struct xfs_attr_leafblock *leaf; - struct xfs_attr_leaf_entry *entry; + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; xfs_attr_leaf_name_local_t *name_loc; - struct xfs_attr3_icleaf_hdr leafhdr; - int bytes; - int i; + int bytes, i; leaf = bp->b_addr; - xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf); - entry = xfs_attr3_leaf_entryp(leaf); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); + entry = &leaf->entries[0]; bytes = sizeof(struct xfs_attr_sf_hdr); - for (i = 0; i < leafhdr.count; entry++, i++) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { if (entry->flags & XFS_ATTR_INCOMPLETE) continue; /* don't copy partial entries */ if (!(entry->flags & XFS_ATTR_LOCAL)) @@ -956,15 +954,15 @@ xfs_attr_shortform_allfit( return(0); if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) return(0); - bytes += sizeof(struct xfs_attr_sf_entry) - 1 + bytes += sizeof(struct xfs_attr_sf_entry)-1 + name_loc->namelen + be16_to_cpu(name_loc->valuelen); } if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) && (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && (bytes == sizeof(struct xfs_attr_sf_hdr))) - return -1; - return xfs_attr_shortform_bytesfit(dp, bytes); + return(-1); + return(xfs_attr_shortform_bytesfit(dp, bytes)); } /* @@ -1412,7 +1410,7 @@ xfs_attr3_leaf_add_work( name_rmt->valuelen = 0; name_rmt->valueblk = 0; args->rmtblkno = 1; - args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); + args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen); } xfs_trans_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index), @@ -1445,12 +1443,11 @@ xfs_attr3_leaf_add_work( STATIC void xfs_attr3_leaf_compact( struct xfs_da_args *args, - struct xfs_attr3_icleaf_hdr *ichdr_dst, + struct xfs_attr3_icleaf_hdr *ichdr_d, struct xfs_buf *bp) { - struct xfs_attr_leafblock *leaf_src; - struct xfs_attr_leafblock *leaf_dst; - struct xfs_attr3_icleaf_hdr ichdr_src; + xfs_attr_leafblock_t *leaf_s, *leaf_d; + struct xfs_attr3_icleaf_hdr ichdr_s; struct xfs_trans *trans = args->trans; struct xfs_mount *mp = trans->t_mountp; char *tmpbuffer; @@ -1458,38 +1455,29 @@ xfs_attr3_leaf_compact( trace_xfs_attr_leaf_compact(args); tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP); + ASSERT(tmpbuffer != NULL); memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp)); memset(bp->b_addr, 0, XFS_LBSIZE(mp)); - leaf_src = (xfs_attr_leafblock_t *)tmpbuffer; - leaf_dst = bp->b_addr; /* - * Copy the on-disk header back into the destination buffer to ensure - * all the information in the header that is not part of the incore - * header structure is preserved. + * Copy basic information */ - memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src)); - - /* Initialise the incore headers */ - ichdr_src = *ichdr_dst; /* struct copy */ - ichdr_dst->firstused = XFS_LBSIZE(mp); - ichdr_dst->usedbytes = 0; - ichdr_dst->count = 0; - ichdr_dst->holes = 0; - ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src); - ichdr_dst->freemap[0].size = ichdr_dst->firstused - - ichdr_dst->freemap[0].base; - - - /* write the header back to initialise the underlying buffer */ - xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst); + leaf_s = (xfs_attr_leafblock_t *)tmpbuffer; + leaf_d = bp->b_addr; + ichdr_s = *ichdr_d; /* struct copy */ + ichdr_d->firstused = XFS_LBSIZE(mp); + ichdr_d->usedbytes = 0; + ichdr_d->count = 0; + ichdr_d->holes = 0; + ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_s); + ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base; /* * Copy all entry's in the same (sorted) order, * but allocate name/value pairs packed and in sequence. */ - xfs_attr3_leaf_moveents(leaf_src, &ichdr_src, 0, leaf_dst, ichdr_dst, 0, - ichdr_src.count, mp); + xfs_attr3_leaf_moveents(leaf_s, &ichdr_s, 0, leaf_d, ichdr_d, 0, + ichdr_s.count, mp); /* * this logs the entire buffer, but the caller must write the header * back to the buffer when it is finished modifying it. @@ -2191,24 +2179,14 @@ xfs_attr3_leaf_unbalance( struct xfs_attr_leafblock *tmp_leaf; struct xfs_attr3_icleaf_hdr tmphdr; - tmp_leaf = kmem_zalloc(state->blocksize, KM_SLEEP); - - /* - * Copy the header into the temp leaf so that all the stuff - * not in the incore header is present and gets copied back in - * once we've moved all the entries. - */ - memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf)); - + tmp_leaf = kmem_alloc(state->blocksize, KM_SLEEP); + memset(tmp_leaf, 0, state->blocksize); memset(&tmphdr, 0, sizeof(tmphdr)); + tmphdr.magic = savehdr.magic; tmphdr.forw = savehdr.forw; tmphdr.back = savehdr.back; tmphdr.firstused = state->blocksize; - - /* write the header to the temp buffer to initialise it */ - xfs_attr3_leaf_hdr_to_disk(tmp_leaf, &tmphdr); - if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, drop_blk->bp, &drophdr)) { xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0, @@ -2352,11 +2330,9 @@ xfs_attr3_leaf_lookup_int( if (!xfs_attr_namesp_match(args->flags, entry->flags)) continue; args->index = probe; - args->valuelen = be32_to_cpu(name_rmt->valuelen); args->rmtblkno = be32_to_cpu(name_rmt->valueblk); - args->rmtblkcnt = xfs_attr3_rmt_blocks( - args->dp->i_mount, - args->valuelen); + args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, + be32_to_cpu(name_rmt->valuelen)); return XFS_ERROR(EEXIST); } } @@ -2407,8 +2383,7 @@ xfs_attr3_leaf_getvalue( ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); valuelen = be32_to_cpu(name_rmt->valuelen); args->rmtblkno = be32_to_cpu(name_rmt->valueblk); - args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount, - valuelen); + args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen); if (args->flags & ATTR_KERNOVAL) { args->valuelen = valuelen; return 0; @@ -2734,8 +2709,7 @@ xfs_attr3_leaf_list_int( args.valuelen = valuelen; args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS); args.rmtblkno = be32_to_cpu(name_rmt->valueblk); - args.rmtblkcnt = xfs_attr3_rmt_blocks( - args.dp->i_mount, valuelen); + args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen); retval = xfs_attr_rmtval_get(&args); if (retval) return retval; @@ -3258,7 +3232,7 @@ xfs_attr3_leaf_inactive( name_rmt = xfs_attr3_leaf_name_remote(leaf, i); if (name_rmt->valueblk) { lp->valueblk = be32_to_cpu(name_rmt->valueblk); - lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount, + lp->valuelen = XFS_B_TO_FSB(dp->i_mount, be32_to_cpu(name_rmt->valuelen)); lp++; } diff --git a/trunk/fs/xfs/xfs_attr_leaf.h b/trunk/fs/xfs/xfs_attr_leaf.h index 444a7704596c..f9d7846097e2 100644 --- a/trunk/fs/xfs/xfs_attr_leaf.h +++ b/trunk/fs/xfs/xfs_attr_leaf.h @@ -128,7 +128,6 @@ struct xfs_attr3_leaf_hdr { __u8 holes; __u8 pad1; struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE]; - __be32 pad2; /* 64 bit alignment */ }; #define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc)) diff --git a/trunk/fs/xfs/xfs_attr_remote.c b/trunk/fs/xfs/xfs_attr_remote.c index ef6b0c124528..dee84466dcc9 100644 --- a/trunk/fs/xfs/xfs_attr_remote.c +++ b/trunk/fs/xfs/xfs_attr_remote.c @@ -47,55 +47,22 @@ * Each contiguous block has a header, so it is not just a simple attribute * length to FSB conversion. */ -int +static int xfs_attr3_rmt_blocks( struct xfs_mount *mp, int attrlen) { - if (xfs_sb_version_hascrc(&mp->m_sb)) { - int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize); - return (attrlen + buflen - 1) / buflen; - } - return XFS_B_TO_FSB(mp, attrlen); -} - -/* - * Checking of the remote attribute header is split into two parts. The verifier - * does CRC, location and bounds checking, the unpacking function checks the - * attribute parameters and owner. - */ -static bool -xfs_attr3_rmt_hdr_ok( - struct xfs_mount *mp, - void *ptr, - xfs_ino_t ino, - uint32_t offset, - uint32_t size, - xfs_daddr_t bno) -{ - struct xfs_attr3_rmt_hdr *rmt = ptr; - - if (bno != be64_to_cpu(rmt->rm_blkno)) - return false; - if (offset != be32_to_cpu(rmt->rm_offset)) - return false; - if (size != be32_to_cpu(rmt->rm_bytes)) - return false; - if (ino != be64_to_cpu(rmt->rm_owner)) - return false; - - /* ok */ - return true; + int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, + mp->m_sb.sb_blocksize); + return (attrlen + buflen - 1) / buflen; } static bool xfs_attr3_rmt_verify( - struct xfs_mount *mp, - void *ptr, - int fsbsize, - xfs_daddr_t bno) + struct xfs_buf *bp) { - struct xfs_attr3_rmt_hdr *rmt = ptr; + struct xfs_mount *mp = bp->b_target->bt_mount; + struct xfs_attr3_rmt_hdr *rmt = bp->b_addr; if (!xfs_sb_version_hascrc(&mp->m_sb)) return false; @@ -103,9 +70,7 @@ xfs_attr3_rmt_verify( return false; if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid)) return false; - if (be64_to_cpu(rmt->rm_blkno) != bno) - return false; - if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt)) + if (bp->b_bn != be64_to_cpu(rmt->rm_blkno)) return false; if (be32_to_cpu(rmt->rm_offset) + be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX) @@ -121,40 +86,17 @@ xfs_attr3_rmt_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; - char *ptr; - int len; - bool corrupt = false; - xfs_daddr_t bno; /* no verification of non-crc buffers */ if (!xfs_sb_version_hascrc(&mp->m_sb)) return; - ptr = bp->b_addr; - bno = bp->b_bn; - len = BBTOB(bp->b_length); - ASSERT(len >= XFS_LBSIZE(mp)); - - while (len > 0) { - if (!xfs_verify_cksum(ptr, XFS_LBSIZE(mp), - XFS_ATTR3_RMT_CRC_OFF)) { - corrupt = true; - break; - } - if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) { - corrupt = true; - break; - } - len -= XFS_LBSIZE(mp); - ptr += XFS_LBSIZE(mp); - bno += mp->m_bsize; - } - - if (corrupt) { + if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), + XFS_ATTR3_RMT_CRC_OFF) || + !xfs_attr3_rmt_verify(bp)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, EFSCORRUPTED); - } else - ASSERT(len == 0); + } } static void @@ -163,39 +105,23 @@ xfs_attr3_rmt_write_verify( { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_buf_log_item *bip = bp->b_fspriv; - char *ptr; - int len; - xfs_daddr_t bno; /* no verification of non-crc buffers */ if (!xfs_sb_version_hascrc(&mp->m_sb)) return; - ptr = bp->b_addr; - bno = bp->b_bn; - len = BBTOB(bp->b_length); - ASSERT(len >= XFS_LBSIZE(mp)); - - while (len > 0) { - if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) { - XFS_CORRUPTION_ERROR(__func__, - XFS_ERRLEVEL_LOW, mp, bp->b_addr); - xfs_buf_ioerror(bp, EFSCORRUPTED); - return; - } - if (bip) { - struct xfs_attr3_rmt_hdr *rmt; - - rmt = (struct xfs_attr3_rmt_hdr *)ptr; - rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); - } - xfs_update_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF); + if (!xfs_attr3_rmt_verify(bp)) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); + xfs_buf_ioerror(bp, EFSCORRUPTED); + return; + } - len -= XFS_LBSIZE(mp); - ptr += XFS_LBSIZE(mp); - bno += mp->m_bsize; + if (bip) { + struct xfs_attr3_rmt_hdr *rmt = bp->b_addr; + rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); } - ASSERT(len == 0); + xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), + XFS_ATTR3_RMT_CRC_OFF); } const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = { @@ -203,16 +129,15 @@ const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = { .verify_write = xfs_attr3_rmt_write_verify, }; -STATIC int +static int xfs_attr3_rmt_hdr_set( struct xfs_mount *mp, - void *ptr, xfs_ino_t ino, uint32_t offset, uint32_t size, - xfs_daddr_t bno) + struct xfs_buf *bp) { - struct xfs_attr3_rmt_hdr *rmt = ptr; + struct xfs_attr3_rmt_hdr *rmt = bp->b_addr; if (!xfs_sb_version_hascrc(&mp->m_sb)) return 0; @@ -222,107 +147,36 @@ xfs_attr3_rmt_hdr_set( rmt->rm_bytes = cpu_to_be32(size); uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid); rmt->rm_owner = cpu_to_be64(ino); - rmt->rm_blkno = cpu_to_be64(bno); + rmt->rm_blkno = cpu_to_be64(bp->b_bn); + bp->b_ops = &xfs_attr3_rmt_buf_ops; return sizeof(struct xfs_attr3_rmt_hdr); } /* - * Helper functions to copy attribute data in and out of the one disk extents + * Checking of the remote attribute header is split into two parts. the verifier + * does CRC, location and bounds checking, the unpacking function checks the + * attribute parameters and owner. */ -STATIC int -xfs_attr_rmtval_copyout( - struct xfs_mount *mp, - struct xfs_buf *bp, - xfs_ino_t ino, - int *offset, - int *valuelen, - char **dst) -{ - char *src = bp->b_addr; - xfs_daddr_t bno = bp->b_bn; - int len = BBTOB(bp->b_length); - - ASSERT(len >= XFS_LBSIZE(mp)); - - while (len > 0 && *valuelen > 0) { - int hdr_size = 0; - int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp)); - - byte_cnt = min_t(int, *valuelen, byte_cnt); - - if (xfs_sb_version_hascrc(&mp->m_sb)) { - if (!xfs_attr3_rmt_hdr_ok(mp, src, ino, *offset, - byte_cnt, bno)) { - xfs_alert(mp, -"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)", - bno, *offset, byte_cnt, ino); - return EFSCORRUPTED; - } - hdr_size = sizeof(struct xfs_attr3_rmt_hdr); - } - - memcpy(*dst, src + hdr_size, byte_cnt); - - /* roll buffer forwards */ - len -= XFS_LBSIZE(mp); - src += XFS_LBSIZE(mp); - bno += mp->m_bsize; - - /* roll attribute data forwards */ - *valuelen -= byte_cnt; - *dst += byte_cnt; - *offset += byte_cnt; - } - return 0; -} - -STATIC void -xfs_attr_rmtval_copyin( - struct xfs_mount *mp, - struct xfs_buf *bp, - xfs_ino_t ino, - int *offset, - int *valuelen, - char **src) +static bool +xfs_attr3_rmt_hdr_ok( + struct xfs_mount *mp, + xfs_ino_t ino, + uint32_t offset, + uint32_t size, + struct xfs_buf *bp) { - char *dst = bp->b_addr; - xfs_daddr_t bno = bp->b_bn; - int len = BBTOB(bp->b_length); - - ASSERT(len >= XFS_LBSIZE(mp)); - - while (len > 0 && *valuelen > 0) { - int hdr_size; - int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp)); - - byte_cnt = min(*valuelen, byte_cnt); - hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset, - byte_cnt, bno); + struct xfs_attr3_rmt_hdr *rmt = bp->b_addr; - memcpy(dst + hdr_size, *src, byte_cnt); - - /* - * If this is the last block, zero the remainder of it. - * Check that we are actually the last block, too. - */ - if (byte_cnt + hdr_size < XFS_LBSIZE(mp)) { - ASSERT(*valuelen - byte_cnt == 0); - ASSERT(len == XFS_LBSIZE(mp)); - memset(dst + hdr_size + byte_cnt, 0, - XFS_LBSIZE(mp) - hdr_size - byte_cnt); - } - - /* roll buffer forwards */ - len -= XFS_LBSIZE(mp); - dst += XFS_LBSIZE(mp); - bno += mp->m_bsize; + if (offset != be32_to_cpu(rmt->rm_offset)) + return false; + if (size != be32_to_cpu(rmt->rm_bytes)) + return false; + if (ino != be64_to_cpu(rmt->rm_owner)) + return false; - /* roll attribute data forwards */ - *valuelen -= byte_cnt; - *src += byte_cnt; - *offset += byte_cnt; - } + /* ok */ + return true; } /* @@ -336,12 +190,13 @@ xfs_attr_rmtval_get( struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE]; struct xfs_mount *mp = args->dp->i_mount; struct xfs_buf *bp; + xfs_daddr_t dblkno; xfs_dablk_t lblkno = args->rmtblkno; - char *dst = args->value; + void *dst = args->value; int valuelen = args->valuelen; int nmap; int error; - int blkcnt = args->rmtblkcnt; + int blkcnt; int i; int offset = 0; @@ -352,36 +207,52 @@ xfs_attr_rmtval_get( while (valuelen > 0) { nmap = ATTR_RMTVALUE_MAPSIZE; error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, - blkcnt, map, &nmap, + args->rmtblkcnt, map, &nmap, XFS_BMAPI_ATTRFORK); if (error) return error; ASSERT(nmap >= 1); for (i = 0; (i < nmap) && (valuelen > 0); i++) { - xfs_daddr_t dblkno; - int dblkcnt; + int byte_cnt; + char *src; ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) && (map[i].br_startblock != HOLESTARTBLOCK)); dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); - dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); + blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, - dblkno, dblkcnt, 0, &bp, + dblkno, blkcnt, 0, &bp, &xfs_attr3_rmt_buf_ops); if (error) return error; - error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino, - &offset, &valuelen, - &dst); + byte_cnt = min_t(int, valuelen, BBTOB(bp->b_length)); + byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt); + + src = bp->b_addr; + if (xfs_sb_version_hascrc(&mp->m_sb)) { + if (!xfs_attr3_rmt_hdr_ok(mp, args->dp->i_ino, + offset, byte_cnt, bp)) { + xfs_alert(mp, +"remote attribute header does not match required off/len/owner (0x%x/Ox%x,0x%llx)", + offset, byte_cnt, args->dp->i_ino); + xfs_buf_relse(bp); + return EFSCORRUPTED; + + } + + src += sizeof(struct xfs_attr3_rmt_hdr); + } + + memcpy(dst, src, byte_cnt); xfs_buf_relse(bp); - if (error) - return error; - /* roll attribute extent map forwards */ + offset += byte_cnt; + dst += byte_cnt; + valuelen -= byte_cnt; + lblkno += map[i].br_blockcount; - blkcnt -= map[i].br_blockcount; } } ASSERT(valuelen == 0); @@ -399,13 +270,17 @@ xfs_attr_rmtval_set( struct xfs_inode *dp = args->dp; struct xfs_mount *mp = dp->i_mount; struct xfs_bmbt_irec map; + struct xfs_buf *bp; + xfs_daddr_t dblkno; xfs_dablk_t lblkno; xfs_fileoff_t lfileoff = 0; - char *src = args->value; + void *src = args->value; int blkcnt; int valuelen; int nmap; int error; + int hdrcnt = 0; + bool crcs = xfs_sb_version_hascrc(&mp->m_sb); int offset = 0; trace_xfs_attr_rmtval_set(args); @@ -414,14 +289,24 @@ xfs_attr_rmtval_set( * Find a "hole" in the attribute address space large enough for * us to drop the new attribute's value into. Because CRC enable * attributes have headers, we can't just do a straight byte to FSB - * conversion and have to take the header space into account. + * conversion. We calculate the worst case block count in this case + * and we may not need that many, so we have to handle this when + * allocating the blocks below. */ - blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); + if (!crcs) + blkcnt = XFS_B_TO_FSB(mp, args->valuelen); + else + blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); + error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff, XFS_ATTR_FORK); if (error) return error; + /* Start with the attribute data. We'll allocate the rest afterwards. */ + if (crcs) + blkcnt = XFS_B_TO_FSB(mp, args->valuelen); + args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff; args->rmtblkcnt = blkcnt; @@ -464,6 +349,26 @@ xfs_attr_rmtval_set( (map.br_startblock != HOLESTARTBLOCK)); lblkno += map.br_blockcount; blkcnt -= map.br_blockcount; + hdrcnt++; + + /* + * If we have enough blocks for the attribute data, calculate + * how many extra blocks we need for headers. We might run + * through this multiple times in the case that the additional + * headers in the blocks needed for the data fragments spills + * into requiring more blocks. e.g. for 512 byte blocks, we'll + * spill for another block every 9 headers we require in this + * loop. + */ + if (crcs && blkcnt == 0) { + int total_len; + + total_len = args->valuelen + + hdrcnt * sizeof(struct xfs_attr3_rmt_hdr); + blkcnt = XFS_B_TO_FSB(mp, total_len); + blkcnt -= args->rmtblkcnt; + args->rmtblkcnt += blkcnt; + } /* * Start the next trans in the chain. @@ -480,19 +385,18 @@ xfs_attr_rmtval_set( * the INCOMPLETE flag. */ lblkno = args->rmtblkno; - blkcnt = args->rmtblkcnt; valuelen = args->valuelen; while (valuelen > 0) { - struct xfs_buf *bp; - xfs_daddr_t dblkno; - int dblkcnt; - - ASSERT(blkcnt > 0); + int byte_cnt; + char *buf; + /* + * Try to remember where we decided to put the value. + */ xfs_bmap_init(args->flist, args->firstblock); nmap = 1; error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno, - blkcnt, &map, &nmap, + args->rmtblkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK); if (error) return(error); @@ -501,27 +405,41 @@ xfs_attr_rmtval_set( (map.br_startblock != HOLESTARTBLOCK)); dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), - dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); + blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); - bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0); + bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, 0); if (!bp) return ENOMEM; bp->b_ops = &xfs_attr3_rmt_buf_ops; - xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset, - &valuelen, &src); + byte_cnt = BBTOB(bp->b_length); + byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt); + if (valuelen < byte_cnt) + byte_cnt = valuelen; + + buf = bp->b_addr; + buf += xfs_attr3_rmt_hdr_set(mp, dp->i_ino, offset, + byte_cnt, bp); + memcpy(buf, src, byte_cnt); + + if (byte_cnt < BBTOB(bp->b_length)) + xfs_buf_zero(bp, byte_cnt, + BBTOB(bp->b_length) - byte_cnt); error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */ xfs_buf_relse(bp); if (error) return error; + src += byte_cnt; + valuelen -= byte_cnt; + offset += byte_cnt; + hdrcnt--; - /* roll attribute extent map forwards */ lblkno += map.br_blockcount; - blkcnt -= map.br_blockcount; } ASSERT(valuelen == 0); + ASSERT(hdrcnt == 0); return 0; } @@ -530,40 +448,33 @@ xfs_attr_rmtval_set( * out-of-line buffer that it is stored on. */ int -xfs_attr_rmtval_remove( - struct xfs_da_args *args) +xfs_attr_rmtval_remove(xfs_da_args_t *args) { - struct xfs_mount *mp = args->dp->i_mount; - xfs_dablk_t lblkno; - int blkcnt; - int error; - int done; + xfs_mount_t *mp; + xfs_bmbt_irec_t map; + xfs_buf_t *bp; + xfs_daddr_t dblkno; + xfs_dablk_t lblkno; + int valuelen, blkcnt, nmap, error, done, committed; trace_xfs_attr_rmtval_remove(args); + mp = args->dp->i_mount; + /* - * Roll through the "value", invalidating the attribute value's blocks. - * Note that args->rmtblkcnt is the minimum number of data blocks we'll - * see for a CRC enabled remote attribute. Each extent will have a - * header, and so we may have more blocks than we realise here. If we - * fail to map the blocks correctly, we'll have problems with the buffer - * lookups. + * Roll through the "value", invalidating the attribute value's + * blocks. */ lblkno = args->rmtblkno; - blkcnt = args->rmtblkcnt; - while (blkcnt > 0) { - struct xfs_bmbt_irec map; - struct xfs_buf *bp; - xfs_daddr_t dblkno; - int dblkcnt; - int nmap; - + valuelen = args->rmtblkcnt; + while (valuelen > 0) { /* * Try to remember where we decided to put the value. */ nmap = 1; error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, - blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK); + args->rmtblkcnt, &map, &nmap, + XFS_BMAPI_ATTRFORK); if (error) return(error); ASSERT(nmap == 1); @@ -571,20 +482,21 @@ xfs_attr_rmtval_remove( (map.br_startblock != HOLESTARTBLOCK)); dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), - dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); + blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); /* * If the "remote" value is in the cache, remove it. */ - bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK); + bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK); if (bp) { xfs_buf_stale(bp); xfs_buf_relse(bp); bp = NULL; } + valuelen -= map.br_blockcount; + lblkno += map.br_blockcount; - blkcnt -= map.br_blockcount; } /* @@ -594,8 +506,6 @@ xfs_attr_rmtval_remove( blkcnt = args->rmtblkcnt; done = 0; while (!done) { - int committed; - xfs_bmap_init(args->flist, args->firstblock); error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, diff --git a/trunk/fs/xfs/xfs_attr_remote.h b/trunk/fs/xfs/xfs_attr_remote.h index 92a8fd7977cc..c7cca60a062a 100644 --- a/trunk/fs/xfs/xfs_attr_remote.h +++ b/trunk/fs/xfs/xfs_attr_remote.h @@ -20,14 +20,6 @@ #define XFS_ATTR3_RMT_MAGIC 0x5841524d /* XARM */ -/* - * There is one of these headers per filesystem block in a remote attribute. - * This is done to ensure there is a 1:1 mapping between the attribute value - * length and the number of blocks needed to store the attribute. This makes the - * verification of a buffer a little more complex, but greatly simplifies the - * allocation, reading and writing of these attributes as we don't have to guess - * the number of blocks needed to store the attribute data. - */ struct xfs_attr3_rmt_hdr { __be32 rm_magic; __be32 rm_offset; @@ -47,8 +39,6 @@ struct xfs_attr3_rmt_hdr { extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops; -int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen); - int xfs_attr_rmtval_get(struct xfs_da_args *args); int xfs_attr_rmtval_set(struct xfs_da_args *args); int xfs_attr_rmtval_remove(struct xfs_da_args *args); diff --git a/trunk/fs/xfs/xfs_btree.c b/trunk/fs/xfs/xfs_btree.c index 0903960410a2..8804b8a3c310 100644 --- a/trunk/fs/xfs/xfs_btree.c +++ b/trunk/fs/xfs/xfs_btree.c @@ -2544,17 +2544,7 @@ xfs_btree_new_iroot( if (error) goto error0; - /* - * we can't just memcpy() the root in for CRC enabled btree blocks. - * In that case have to also ensure the blkno remains correct - */ memcpy(cblock, block, xfs_btree_block_len(cur)); - if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) { - if (cur->bc_flags & XFS_BTREE_LONG_PTRS) - cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn); - else - cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn); - } be16_add_cpu(&block->bb_level, 1); xfs_btree_set_numrecs(block, 1); diff --git a/trunk/fs/xfs/xfs_buf.c b/trunk/fs/xfs/xfs_buf.c index 1b2472a46e46..82b70bda9f47 100644 --- a/trunk/fs/xfs/xfs_buf.c +++ b/trunk/fs/xfs/xfs_buf.c @@ -513,7 +513,6 @@ _xfs_buf_find( xfs_alert(btp->bt_mount, "%s: Block out of range: block 0x%llx, EOFS 0x%llx ", __func__, blkno, eofs); - WARN_ON(1); return NULL; } @@ -1650,7 +1649,7 @@ xfs_alloc_buftarg( { xfs_buftarg_t *btp; - btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS); + btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); btp->bt_mount = mp; btp->bt_dev = bdev->bd_dev; diff --git a/trunk/fs/xfs/xfs_buf_item.c b/trunk/fs/xfs/xfs_buf_item.c index 4ec431777048..cf263476d6b4 100644 --- a/trunk/fs/xfs/xfs_buf_item.c +++ b/trunk/fs/xfs/xfs_buf_item.c @@ -262,7 +262,12 @@ xfs_buf_item_format_segment( vecp->i_addr = xfs_buf_offset(bp, buffer_offset); vecp->i_len = nbits * XFS_BLF_CHUNK; vecp->i_type = XLOG_REG_TYPE_BCHUNK; - nvecs++; +/* + * You would think we need to bump the nvecs here too, but we do not + * this number is used by recovery, and it gets confused by the boundary + * split here + * nvecs++; + */ vecp++; first_bit = next_bit; last_bit = next_bit; diff --git a/trunk/fs/xfs/xfs_da_btree.c b/trunk/fs/xfs/xfs_da_btree.c index 0b8b2a13cd24..9b26a99ebfe9 100644 --- a/trunk/fs/xfs/xfs_da_btree.c +++ b/trunk/fs/xfs/xfs_da_btree.c @@ -270,7 +270,6 @@ xfs_da3_node_read_verify( break; return; case XFS_ATTR_LEAF_MAGIC: - case XFS_ATTR3_LEAF_MAGIC: bp->b_ops = &xfs_attr3_leaf_buf_ops; bp->b_ops->verify_read(bp); return; @@ -2465,8 +2464,7 @@ xfs_buf_map_from_irec( ASSERT(nirecs >= 1); if (nirecs > 1) { - map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), - KM_SLEEP | KM_NOFS); + map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP); if (!map) return ENOMEM; *mapp = map; @@ -2522,8 +2520,7 @@ xfs_dabuf_map( * Optimize the one-block case. */ if (nfsb != 1) - irecs = kmem_zalloc(sizeof(irec) * nfsb, - KM_SLEEP | KM_NOFS); + irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP); nirecs = nfsb; error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs, diff --git a/trunk/fs/xfs/xfs_dfrag.c b/trunk/fs/xfs/xfs_dfrag.c index c407e1ccff43..f852b082a084 100644 --- a/trunk/fs/xfs/xfs_dfrag.c +++ b/trunk/fs/xfs/xfs_dfrag.c @@ -219,14 +219,6 @@ xfs_swap_extents( int taforkblks = 0; __uint64_t tmp; - /* - * We have no way of updating owner information in the BMBT blocks for - * each inode on CRC enabled filesystems, so to avoid corrupting the - * this metadata we simply don't allow extent swaps to occur. - */ - if (xfs_sb_version_hascrc(&mp->m_sb)) - return XFS_ERROR(EINVAL); - tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL); if (!tempifp) { error = XFS_ERROR(ENOMEM); diff --git a/trunk/fs/xfs/xfs_dir2.c b/trunk/fs/xfs/xfs_dir2.c index 8f023dee404d..b26a50f9921d 100644 --- a/trunk/fs/xfs/xfs_dir2.c +++ b/trunk/fs/xfs/xfs_dir2.c @@ -368,8 +368,10 @@ xfs_dir_removename( int xfs_readdir( xfs_inode_t *dp, - struct dir_context *ctx, - size_t bufsize) + void *dirent, + size_t bufsize, + xfs_off_t *offset, + filldir_t filldir) { int rval; /* return value */ int v; /* type-checking value */ @@ -383,13 +385,14 @@ xfs_readdir( XFS_STATS_INC(xs_dir_getdents); if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) - rval = xfs_dir2_sf_getdents(dp, ctx); + rval = xfs_dir2_sf_getdents(dp, dirent, offset, filldir); else if ((rval = xfs_dir2_isblock(NULL, dp, &v))) ; else if (v) - rval = xfs_dir2_block_getdents(dp, ctx); + rval = xfs_dir2_block_getdents(dp, dirent, offset, filldir); else - rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize); + rval = xfs_dir2_leaf_getdents(dp, dirent, bufsize, offset, + filldir); return rval; } diff --git a/trunk/fs/xfs/xfs_dir2_block.c b/trunk/fs/xfs/xfs_dir2_block.c index 09aea0247d96..e59f5fc816fe 100644 --- a/trunk/fs/xfs/xfs_dir2_block.c +++ b/trunk/fs/xfs/xfs_dir2_block.c @@ -569,7 +569,9 @@ xfs_dir2_block_addname( int /* error */ xfs_dir2_block_getdents( xfs_inode_t *dp, /* incore inode */ - struct dir_context *ctx) + void *dirent, + xfs_off_t *offset, + filldir_t filldir) { xfs_dir2_data_hdr_t *hdr; /* block header */ struct xfs_buf *bp; /* buffer for block */ @@ -587,7 +589,7 @@ xfs_dir2_block_getdents( /* * If the block number in the offset is out of range, we're done. */ - if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk) + if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk) return 0; error = xfs_dir3_block_read(NULL, dp, &bp); @@ -598,7 +600,7 @@ xfs_dir2_block_getdents( * Extract the byte offset we start at from the seek pointer. * We'll skip entries before this. */ - wantoff = xfs_dir2_dataptr_to_off(mp, ctx->pos); + wantoff = xfs_dir2_dataptr_to_off(mp, *offset); hdr = bp->b_addr; xfs_dir3_data_check(dp, bp); /* @@ -637,12 +639,13 @@ xfs_dir2_block_getdents( cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, (char *)dep - (char *)hdr); - ctx->pos = cook & 0x7fffffff; /* * If it didn't fit, set the final offset to here & return. */ - if (!dir_emit(ctx, (char *)dep->name, dep->namelen, - be64_to_cpu(dep->inumber), DT_UNKNOWN)) { + if (filldir(dirent, (char *)dep->name, dep->namelen, + cook & 0x7fffffff, be64_to_cpu(dep->inumber), + DT_UNKNOWN)) { + *offset = cook & 0x7fffffff; xfs_trans_brelse(NULL, bp); return 0; } @@ -652,7 +655,7 @@ xfs_dir2_block_getdents( * Reached the end of the block. * Set the offset to a non-existent block 1 and return. */ - ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & + *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & 0x7fffffff; xfs_trans_brelse(NULL, bp); return 0; diff --git a/trunk/fs/xfs/xfs_dir2_format.h b/trunk/fs/xfs/xfs_dir2_format.h index 7826782b8d78..a3b1bd841a80 100644 --- a/trunk/fs/xfs/xfs_dir2_format.h +++ b/trunk/fs/xfs/xfs_dir2_format.h @@ -266,7 +266,6 @@ struct xfs_dir3_blk_hdr { struct xfs_dir3_data_hdr { struct xfs_dir3_blk_hdr hdr; xfs_dir2_data_free_t best_free[XFS_DIR2_DATA_FD_COUNT]; - __be32 pad; /* 64 bit alignment */ }; #define XFS_DIR3_DATA_CRC_OFF offsetof(struct xfs_dir3_data_hdr, hdr.crc) @@ -478,7 +477,7 @@ struct xfs_dir3_leaf_hdr { struct xfs_da3_blkinfo info; /* header for da routines */ __be16 count; /* count of entries */ __be16 stale; /* count of stale entries */ - __be32 pad; /* 64 bit alignment */ + __be32 pad; }; struct xfs_dir3_icleaf_hdr { @@ -716,7 +715,6 @@ struct xfs_dir3_free_hdr { __be32 firstdb; /* db of first entry */ __be32 nvalid; /* count of valid entries */ __be32 nused; /* count of used entries */ - __be32 pad; /* 64 bit alignment */ }; struct xfs_dir3_free { diff --git a/trunk/fs/xfs/xfs_dir2_leaf.c b/trunk/fs/xfs/xfs_dir2_leaf.c index e0cc1243a8aa..721ba2fe8e54 100644 --- a/trunk/fs/xfs/xfs_dir2_leaf.c +++ b/trunk/fs/xfs/xfs_dir2_leaf.c @@ -1300,8 +1300,10 @@ xfs_dir2_leaf_readbuf( int /* error */ xfs_dir2_leaf_getdents( xfs_inode_t *dp, /* incore directory inode */ - struct dir_context *ctx, - size_t bufsize) + void *dirent, + size_t bufsize, + xfs_off_t *offset, + filldir_t filldir) { struct xfs_buf *bp = NULL; /* data block buffer */ xfs_dir2_data_hdr_t *hdr; /* data block header */ @@ -1320,7 +1322,7 @@ xfs_dir2_leaf_getdents( * If the offset is at or past the largest allowed value, * give up right away. */ - if (ctx->pos >= XFS_DIR2_MAX_DATAPTR) + if (*offset >= XFS_DIR2_MAX_DATAPTR) return 0; mp = dp->i_mount; @@ -1334,14 +1336,14 @@ xfs_dir2_leaf_getdents( mp->m_sb.sb_blocksize); map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) + (length * sizeof(struct xfs_bmbt_irec)), - KM_SLEEP | KM_NOFS); + KM_SLEEP); map_info->map_size = length; /* * Inside the loop we keep the main offset value as a byte offset * in the directory file. */ - curoff = xfs_dir2_dataptr_to_byte(mp, ctx->pos); + curoff = xfs_dir2_dataptr_to_byte(mp, *offset); /* * Force this conversion through db so we truncate the offset @@ -1442,8 +1444,8 @@ xfs_dir2_leaf_getdents( dep = (xfs_dir2_data_entry_t *)ptr; length = xfs_dir2_data_entsize(dep->namelen); - ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff; - if (!dir_emit(ctx, (char *)dep->name, dep->namelen, + if (filldir(dirent, (char *)dep->name, dep->namelen, + xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff, be64_to_cpu(dep->inumber), DT_UNKNOWN)) break; @@ -1460,9 +1462,9 @@ xfs_dir2_leaf_getdents( * All done. Set output offset value to current offset. */ if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR)) - ctx->pos = XFS_DIR2_MAX_DATAPTR & 0x7fffffff; + *offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff; else - ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff; + *offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff; kmem_free(map_info); if (bp) xfs_trans_brelse(NULL, bp); diff --git a/trunk/fs/xfs/xfs_dir2_node.c b/trunk/fs/xfs/xfs_dir2_node.c index 2226a00acd15..5246de4912d4 100644 --- a/trunk/fs/xfs/xfs_dir2_node.c +++ b/trunk/fs/xfs/xfs_dir2_node.c @@ -263,19 +263,18 @@ xfs_dir3_free_get_buf( * Initialize the new block to be empty, and remember * its first slot as our empty slot. */ - memset(bp->b_addr, 0, sizeof(struct xfs_dir3_free_hdr)); - memset(&hdr, 0, sizeof(hdr)); - + hdr.magic = XFS_DIR2_FREE_MAGIC; + hdr.firstdb = 0; + hdr.nused = 0; + hdr.nvalid = 0; if (xfs_sb_version_hascrc(&mp->m_sb)) { struct xfs_dir3_free_hdr *hdr3 = bp->b_addr; hdr.magic = XFS_DIR3_FREE_MAGIC; - hdr3->hdr.blkno = cpu_to_be64(bp->b_bn); hdr3->hdr.owner = cpu_to_be64(dp->i_ino); uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid); - } else - hdr.magic = XFS_DIR2_FREE_MAGIC; + } xfs_dir3_free_hdr_to_disk(bp->b_addr, &hdr); *bpp = bp; return 0; @@ -1922,6 +1921,8 @@ xfs_dir2_node_addname_int( */ freehdr.firstdb = (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * xfs_dir3_free_max_bests(mp); + free->hdr.nvalid = 0; + free->hdr.nused = 0; } else { free = fbp->b_addr; bests = xfs_dir3_free_bests_p(mp, free); diff --git a/trunk/fs/xfs/xfs_dir2_priv.h b/trunk/fs/xfs/xfs_dir2_priv.h index 0511cda4a712..7cf573c88aad 100644 --- a/trunk/fs/xfs/xfs_dir2_priv.h +++ b/trunk/fs/xfs/xfs_dir2_priv.h @@ -33,8 +33,8 @@ extern int xfs_dir_cilookup_result(struct xfs_da_args *args, extern const struct xfs_buf_ops xfs_dir3_block_buf_ops; extern int xfs_dir2_block_addname(struct xfs_da_args *args); -extern int xfs_dir2_block_getdents(struct xfs_inode *dp, - struct dir_context *ctx); +extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent, + xfs_off_t *offset, filldir_t filldir); extern int xfs_dir2_block_lookup(struct xfs_da_args *args); extern int xfs_dir2_block_removename(struct xfs_da_args *args); extern int xfs_dir2_block_replace(struct xfs_da_args *args); @@ -91,8 +91,8 @@ extern void xfs_dir3_leaf_compact(struct xfs_da_args *args, extern void xfs_dir3_leaf_compact_x1(struct xfs_dir3_icleaf_hdr *leafhdr, struct xfs_dir2_leaf_entry *ents, int *indexp, int *lowstalep, int *highstalep, int *lowlogp, int *highlogp); -extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, struct dir_context *ctx, - size_t bufsize); +extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent, + size_t bufsize, xfs_off_t *offset, filldir_t filldir); extern int xfs_dir3_leaf_get_buf(struct xfs_da_args *args, xfs_dir2_db_t bno, struct xfs_buf **bpp, __uint16_t magic); extern void xfs_dir3_leaf_log_ents(struct xfs_trans *tp, struct xfs_buf *bp, @@ -153,7 +153,8 @@ extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp, int size, xfs_dir2_sf_hdr_t *sfhp); extern int xfs_dir2_sf_addname(struct xfs_da_args *args); extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); -extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, struct dir_context *ctx); +extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, void *dirent, + xfs_off_t *offset, filldir_t filldir); extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); extern int xfs_dir2_sf_removename(struct xfs_da_args *args); extern int xfs_dir2_sf_replace(struct xfs_da_args *args); diff --git a/trunk/fs/xfs/xfs_dir2_sf.c b/trunk/fs/xfs/xfs_dir2_sf.c index 97676a347da1..6157424dbf8f 100644 --- a/trunk/fs/xfs/xfs_dir2_sf.c +++ b/trunk/fs/xfs/xfs_dir2_sf.c @@ -768,7 +768,9 @@ xfs_dir2_sf_create( int /* error */ xfs_dir2_sf_getdents( xfs_inode_t *dp, /* incore directory inode */ - struct dir_context *ctx) + void *dirent, + xfs_off_t *offset, + filldir_t filldir) { int i; /* shortform entry number */ xfs_mount_t *mp; /* filesystem mount point */ @@ -800,7 +802,7 @@ xfs_dir2_sf_getdents( /* * If the block number in the offset is out of range, we're done. */ - if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk) + if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk) return 0; /* @@ -817,20 +819,22 @@ xfs_dir2_sf_getdents( /* * Put . entry unless we're starting past it. */ - if (ctx->pos <= dot_offset) { - ctx->pos = dot_offset & 0x7fffffff; - if (!dir_emit(ctx, ".", 1, dp->i_ino, DT_DIR)) + if (*offset <= dot_offset) { + if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, dp->i_ino, DT_DIR)) { + *offset = dot_offset & 0x7fffffff; return 0; + } } /* * Put .. entry unless we're starting past it. */ - if (ctx->pos <= dotdot_offset) { + if (*offset <= dotdot_offset) { ino = xfs_dir2_sf_get_parent_ino(sfp); - ctx->pos = dotdot_offset & 0x7fffffff; - if (!dir_emit(ctx, "..", 2, ino, DT_DIR)) + if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) { + *offset = dotdot_offset & 0x7fffffff; return 0; + } } /* @@ -841,20 +845,21 @@ xfs_dir2_sf_getdents( off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, xfs_dir2_sf_get_offset(sfep)); - if (ctx->pos > off) { + if (*offset > off) { sfep = xfs_dir2_sf_nextentry(sfp, sfep); continue; } ino = xfs_dir2_sfe_get_ino(sfp, sfep); - ctx->pos = off & 0x7fffffff; - if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, - ino, DT_UNKNOWN)) + if (filldir(dirent, (char *)sfep->name, sfep->namelen, + off & 0x7fffffff, ino, DT_UNKNOWN)) { + *offset = off & 0x7fffffff; return 0; + } sfep = xfs_dir2_sf_nextentry(sfp, sfep); } - ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & + *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & 0x7fffffff; return 0; } diff --git a/trunk/fs/xfs/xfs_dquot.c b/trunk/fs/xfs/xfs_dquot.c index 044e97a33c8d..a41f8bf1da37 100644 --- a/trunk/fs/xfs/xfs_dquot.c +++ b/trunk/fs/xfs/xfs_dquot.c @@ -249,11 +249,8 @@ xfs_qm_init_dquot_blk( d->dd_diskdq.d_version = XFS_DQUOT_VERSION; d->dd_diskdq.d_id = cpu_to_be32(curid); d->dd_diskdq.d_flags = type; - if (xfs_sb_version_hascrc(&mp->m_sb)) { + if (xfs_sb_version_hascrc(&mp->m_sb)) uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid); - xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), - XFS_DQUOT_CRC_OFF); - } } xfs_trans_dquot_buf(tp, bp, @@ -289,6 +286,23 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp) dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5; } +STATIC void +xfs_dquot_buf_calc_crc( + struct xfs_mount *mp, + struct xfs_buf *bp) +{ + struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; + int i; + + if (!xfs_sb_version_hascrc(&mp->m_sb)) + return; + + for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++, d++) { + xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), + offsetof(struct xfs_dqblk, dd_crc)); + } +} + STATIC bool xfs_dquot_buf_verify_crc( struct xfs_mount *mp, @@ -314,11 +328,12 @@ xfs_dquot_buf_verify_crc( for (i = 0; i < ndquots; i++, d++) { if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), - XFS_DQUOT_CRC_OFF)) + offsetof(struct xfs_dqblk, dd_crc))) return false; if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid)) return false; } + return true; } @@ -378,11 +393,6 @@ xfs_dquot_buf_read_verify( } } -/* - * we don't calculate the CRC here as that is done when the dquot is flushed to - * the buffer after the update is done. This ensures that the dquot in the - * buffer always has an up-to-date CRC value. - */ void xfs_dquot_buf_write_verify( struct xfs_buf *bp) @@ -394,6 +404,7 @@ xfs_dquot_buf_write_verify( xfs_buf_ioerror(bp, EFSCORRUPTED); return; } + xfs_dquot_buf_calc_crc(mp, bp); } const struct xfs_buf_ops xfs_dquot_buf_ops = { @@ -1140,17 +1151,11 @@ xfs_qm_dqflush( * copy the lsn into the on-disk dquot now while we have the in memory * dquot here. This can't be done later in the write verifier as we * can't get access to the log item at that point in time. - * - * We also calculate the CRC here so that the on-disk dquot in the - * buffer always has a valid CRC. This ensures there is no possibility - * of a dquot without an up-to-date CRC getting to disk. */ if (xfs_sb_version_hascrc(&mp->m_sb)) { struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp; dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn); - xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), - XFS_DQUOT_CRC_OFF); } /* diff --git a/trunk/fs/xfs/xfs_extfree_item.c b/trunk/fs/xfs/xfs_extfree_item.c index 452920a3f03f..c0f375087efc 100644 --- a/trunk/fs/xfs/xfs_extfree_item.c +++ b/trunk/fs/xfs/xfs_extfree_item.c @@ -305,12 +305,11 @@ xfs_efi_release(xfs_efi_log_item_t *efip, { ASSERT(atomic_read(&efip->efi_next_extent) >= nextents); if (atomic_sub_and_test(nextents, &efip->efi_next_extent)) { + __xfs_efi_release(efip); + /* recovery needs us to drop the EFI reference, too */ if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) __xfs_efi_release(efip); - - __xfs_efi_release(efip); - /* efip may now have been freed, do not reference it again. */ } } diff --git a/trunk/fs/xfs/xfs_file.c b/trunk/fs/xfs/xfs_file.c index 0ad2b95fca12..a5f2042aec8b 100644 --- a/trunk/fs/xfs/xfs_file.c +++ b/trunk/fs/xfs/xfs_file.c @@ -906,10 +906,11 @@ xfs_file_release( STATIC int xfs_file_readdir( - struct file *file, - struct dir_context *ctx) + struct file *filp, + void *dirent, + filldir_t filldir) { - struct inode *inode = file_inode(file); + struct inode *inode = file_inode(filp); xfs_inode_t *ip = XFS_I(inode); int error; size_t bufsize; @@ -928,7 +929,8 @@ xfs_file_readdir( */ bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); - error = xfs_readdir(ip, ctx, bufsize); + error = xfs_readdir(ip, dirent, bufsize, + (xfs_off_t *)&filp->f_pos, filldir); if (error) return -error; return 0; @@ -1430,7 +1432,7 @@ const struct file_operations xfs_file_operations = { const struct file_operations xfs_dir_file_operations = { .open = xfs_dir_open, .read = generic_read_dir, - .iterate = xfs_file_readdir, + .readdir = xfs_file_readdir, .llseek = generic_file_llseek, .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT diff --git a/trunk/fs/xfs/xfs_fs.h b/trunk/fs/xfs/xfs_fs.h index d04695545397..6dda3f949b04 100644 --- a/trunk/fs/xfs/xfs_fs.h +++ b/trunk/fs/xfs/xfs_fs.h @@ -236,7 +236,6 @@ typedef struct xfs_fsop_resblks { #define XFS_FSOP_GEOM_FLAGS_PROJID32 0x0800 /* 32-bit project IDs */ #define XFS_FSOP_GEOM_FLAGS_DIRV2CI 0x1000 /* ASCII only CI names */ #define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */ -#define XFS_FSOP_GEOM_FLAGS_V5SB 0x8000 /* version 5 superblock */ /* diff --git a/trunk/fs/xfs/xfs_fsops.c b/trunk/fs/xfs/xfs_fsops.c index 3c3644ea825b..87595b211da1 100644 --- a/trunk/fs/xfs/xfs_fsops.c +++ b/trunk/fs/xfs/xfs_fsops.c @@ -99,9 +99,7 @@ xfs_fs_geometry( (xfs_sb_version_hasattr2(&mp->m_sb) ? XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) | (xfs_sb_version_hasprojid32bit(&mp->m_sb) ? - XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) | - (xfs_sb_version_hascrc(&mp->m_sb) ? - XFS_FSOP_GEOM_FLAGS_V5SB : 0); + XFS_FSOP_GEOM_FLAGS_PROJID32 : 0); geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ? mp->m_sb.sb_logsectsize : BBSIZE; geo->rtsectsize = mp->m_sb.sb_blocksize; diff --git a/trunk/fs/xfs/xfs_inode.c b/trunk/fs/xfs/xfs_inode.c index 7f7be5f98f52..efbe1accb6ca 100644 --- a/trunk/fs/xfs/xfs_inode.c +++ b/trunk/fs/xfs/xfs_inode.c @@ -1638,10 +1638,6 @@ xfs_iunlink( dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); - - /* need to recalc the inode CRC if appropriate */ - xfs_dinode_calc_crc(mp, dip); - xfs_trans_inode_buf(tp, ibp); xfs_trans_log_buf(tp, ibp, offset, (offset + sizeof(xfs_agino_t) - 1)); @@ -1727,10 +1723,6 @@ xfs_iunlink_remove( dip->di_next_unlinked = cpu_to_be32(NULLAGINO); offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); - - /* need to recalc the inode CRC if appropriate */ - xfs_dinode_calc_crc(mp, dip); - xfs_trans_inode_buf(tp, ibp); xfs_trans_log_buf(tp, ibp, offset, (offset + sizeof(xfs_agino_t) - 1)); @@ -1804,10 +1796,6 @@ xfs_iunlink_remove( dip->di_next_unlinked = cpu_to_be32(NULLAGINO); offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); - - /* need to recalc the inode CRC if appropriate */ - xfs_dinode_calc_crc(mp, dip); - xfs_trans_inode_buf(tp, ibp); xfs_trans_log_buf(tp, ibp, offset, (offset + sizeof(xfs_agino_t) - 1)); @@ -1821,10 +1809,6 @@ xfs_iunlink_remove( last_dip->di_next_unlinked = cpu_to_be32(next_agino); ASSERT(next_agino != 0); offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); - - /* need to recalc the inode CRC if appropriate */ - xfs_dinode_calc_crc(mp, last_dip); - xfs_trans_inode_buf(tp, last_ibp); xfs_trans_log_buf(tp, last_ibp, offset, (offset + sizeof(xfs_agino_t) - 1)); diff --git a/trunk/fs/xfs/xfs_iops.c b/trunk/fs/xfs/xfs_iops.c index ca9ecaa81112..d82efaa2ac73 100644 --- a/trunk/fs/xfs/xfs_iops.c +++ b/trunk/fs/xfs/xfs_iops.c @@ -455,28 +455,6 @@ xfs_vn_getattr( return 0; } -static void -xfs_setattr_mode( - struct xfs_trans *tp, - struct xfs_inode *ip, - struct iattr *iattr) -{ - struct inode *inode = VFS_I(ip); - umode_t mode = iattr->ia_mode; - - ASSERT(tp); - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - - if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) - mode &= ~S_ISGID; - - ip->i_d.di_mode &= S_IFMT; - ip->i_d.di_mode |= mode & ~S_IFMT; - - inode->i_mode &= S_IFMT; - inode->i_mode |= mode & ~S_IFMT; -} - int xfs_setattr_nonsize( struct xfs_inode *ip, @@ -628,8 +606,18 @@ xfs_setattr_nonsize( /* * Change file access modes. */ - if (mask & ATTR_MODE) - xfs_setattr_mode(tp, ip, iattr); + if (mask & ATTR_MODE) { + umode_t mode = iattr->ia_mode; + + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) + mode &= ~S_ISGID; + + ip->i_d.di_mode &= S_IFMT; + ip->i_d.di_mode |= mode & ~S_IFMT; + + inode->i_mode &= S_IFMT; + inode->i_mode |= mode & ~S_IFMT; + } /* * Change file access or modified times. @@ -726,8 +714,9 @@ xfs_setattr_size( return XFS_ERROR(error); ASSERT(S_ISREG(ip->i_d.di_mode)); - ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| - ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); + ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| + ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID| + ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); if (!(flags & XFS_ATTR_NOLOCK)) { lock_flags |= XFS_IOLOCK_EXCL; @@ -871,12 +860,6 @@ xfs_setattr_size( xfs_inode_clear_eofblocks_tag(ip); } - /* - * Change file access modes. - */ - if (mask & ATTR_MODE) - xfs_setattr_mode(tp, ip, iattr); - if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; diff --git a/trunk/fs/xfs/xfs_log_cil.c b/trunk/fs/xfs/xfs_log_cil.c index d0833b54e55d..e3d0b85d852b 100644 --- a/trunk/fs/xfs/xfs_log_cil.c +++ b/trunk/fs/xfs/xfs_log_cil.c @@ -139,7 +139,7 @@ xlog_cil_prepare_log_vecs( new_lv = kmem_zalloc(sizeof(*new_lv) + niovecs * sizeof(struct xfs_log_iovec), - KM_SLEEP|KM_NOFS); + KM_SLEEP); /* The allocated iovec region lies beyond the log vector. */ new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1]; diff --git a/trunk/fs/xfs/xfs_log_recover.c b/trunk/fs/xfs/xfs_log_recover.c index 7cf5e4eafe28..93f03ec17eec 100644 --- a/trunk/fs/xfs/xfs_log_recover.c +++ b/trunk/fs/xfs/xfs_log_recover.c @@ -1599,43 +1599,10 @@ xlog_recover_add_to_trans( } /* - * Sort the log items in the transaction. - * - * The ordering constraints are defined by the inode allocation and unlink - * behaviour. The rules are: - * - * 1. Every item is only logged once in a given transaction. Hence it - * represents the last logged state of the item. Hence ordering is - * dependent on the order in which operations need to be performed so - * required initial conditions are always met. - * - * 2. Cancelled buffers are recorded in pass 1 in a separate table and - * there's nothing to replay from them so we can simply cull them - * from the transaction. However, we can't do that until after we've - * replayed all the other items because they may be dependent on the - * cancelled buffer and replaying the cancelled buffer can remove it - * form the cancelled buffer table. Hence they have tobe done last. - * - * 3. Inode allocation buffers must be replayed before inode items that - * read the buffer and replay changes into it. - * - * 4. Inode unlink buffers must be replayed after inode items are replayed. - * This ensures that inodes are completely flushed to the inode buffer - * in a "free" state before we remove the unlinked inode list pointer. - * - * Hence the ordering needs to be inode allocation buffers first, inode items - * second, inode unlink buffers third and cancelled buffers last. - * - * But there's a problem with that - we can't tell an inode allocation buffer - * apart from a regular buffer, so we can't separate them. We can, however, - * tell an inode unlink buffer from the others, and so we can separate them out - * from all the other buffers and move them to last. - * - * Hence, 4 lists, in order from head to tail: - * - buffer_list for all buffers except cancelled/inode unlink buffers - * - item_list for all non-buffer items - * - inode_buffer_list for inode unlink buffers - * - cancel_list for the cancelled buffers + * Sort the log items in the transaction. Cancelled buffers need + * to be put first so they are processed before any items that might + * modify the buffers. If they are cancelled, then the modifications + * don't need to be replayed. */ STATIC int xlog_recover_reorder_trans( @@ -1645,10 +1612,6 @@ xlog_recover_reorder_trans( { xlog_recover_item_t *item, *n; LIST_HEAD(sort_list); - LIST_HEAD(cancel_list); - LIST_HEAD(buffer_list); - LIST_HEAD(inode_buffer_list); - LIST_HEAD(inode_list); list_splice_init(&trans->r_itemq, &sort_list); list_for_each_entry_safe(item, n, &sort_list, ri_list) { @@ -1656,18 +1619,12 @@ xlog_recover_reorder_trans( switch (ITEM_TYPE(item)) { case XFS_LI_BUF: - if (buf_f->blf_flags & XFS_BLF_CANCEL) { + if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { trace_xfs_log_recover_item_reorder_head(log, trans, item, pass); - list_move(&item->ri_list, &cancel_list); - break; - } - if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { - list_move(&item->ri_list, &inode_buffer_list); + list_move(&item->ri_list, &trans->r_itemq); break; } - list_move_tail(&item->ri_list, &buffer_list); - break; case XFS_LI_INODE: case XFS_LI_DQUOT: case XFS_LI_QUOTAOFF: @@ -1675,7 +1632,7 @@ xlog_recover_reorder_trans( case XFS_LI_EFI: trace_xfs_log_recover_item_reorder_tail(log, trans, item, pass); - list_move_tail(&item->ri_list, &inode_list); + list_move_tail(&item->ri_list, &trans->r_itemq); break; default: xfs_warn(log->l_mp, @@ -1686,14 +1643,6 @@ xlog_recover_reorder_trans( } } ASSERT(list_empty(&sort_list)); - if (!list_empty(&buffer_list)) - list_splice(&buffer_list, &trans->r_itemq); - if (!list_empty(&inode_list)) - list_splice_tail(&inode_list, &trans->r_itemq); - if (!list_empty(&inode_buffer_list)) - list_splice_tail(&inode_buffer_list, &trans->r_itemq); - if (!list_empty(&cancel_list)) - list_splice_tail(&cancel_list, &trans->r_itemq); return 0; } @@ -1845,13 +1794,7 @@ xlog_recover_do_inode_buffer( xfs_agino_t *buffer_nextp; trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); - - /* - * Post recovery validation only works properly on CRC enabled - * filesystems. - */ - if (xfs_sb_version_hascrc(&mp->m_sb)) - bp->b_ops = &xfs_inode_buf_ops; + bp->b_ops = &xfs_inode_buf_ops; inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog; for (i = 0; i < inodes_per_buf; i++) { @@ -1918,15 +1861,6 @@ xlog_recover_do_inode_buffer( buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, next_unlinked_offset); *buffer_nextp = *logged_nextp; - - /* - * If necessary, recalculate the CRC in the on-disk inode. We - * have to leave the inode in a consistent state for whoever - * reads it next.... - */ - xfs_dinode_calc_crc(mp, (struct xfs_dinode *) - xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); - } return 0; @@ -2162,17 +2096,6 @@ xlog_recover_do_reg_buffer( ASSERT(BBTOB(bp->b_io_length) >= ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); - /* - * The dirty regions logged in the buffer, even though - * contiguous, may span multiple chunks. This is because the - * dirty region may span a physical page boundary in a buffer - * and hence be split into two separate vectors for writing into - * the log. Hence we need to trim nbits back to the length of - * the current region being copied out of the log. - */ - if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT)) - nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT; - /* * Do a sanity check if this is a dquot buffer. Just checking * the first dquot in the buffer should do. XXXThis is @@ -2211,16 +2134,7 @@ xlog_recover_do_reg_buffer( /* Shouldn't be any more regions */ ASSERT(i == item->ri_total); - /* - * We can only do post recovery validation on items on CRC enabled - * fielsystems as we need to know when the buffer was written to be able - * to determine if we should have replayed the item. If we replay old - * metadata over a newer buffer, then it will enter a temporarily - * inconsistent state resulting in verification failures. Hence for now - * just avoid the verification stage for non-crc filesystems - */ - if (xfs_sb_version_hascrc(&mp->m_sb)) - xlog_recovery_validate_buf_type(mp, bp, buf_f); + xlog_recovery_validate_buf_type(mp, bp, buf_f); } /* @@ -2341,12 +2255,6 @@ xfs_qm_dqcheck( d->dd_diskdq.d_flags = type; d->dd_diskdq.d_id = cpu_to_be32(id); - if (xfs_sb_version_hascrc(&mp->m_sb)) { - uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid); - xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), - XFS_DQUOT_CRC_OFF); - } - return errs; } @@ -2874,10 +2782,6 @@ xlog_recover_dquot_pass2( } memcpy(ddq, recddq, item->ri_buf[1].i_len); - if (xfs_sb_version_hascrc(&mp->m_sb)) { - xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk), - XFS_DQUOT_CRC_OFF); - } ASSERT(dq_f->qlf_size == 2); ASSERT(bp->b_target->bt_mount == mp); diff --git a/trunk/fs/xfs/xfs_mount.c b/trunk/fs/xfs/xfs_mount.c index e8e310c05097..f6bfbd734669 100644 --- a/trunk/fs/xfs/xfs_mount.c +++ b/trunk/fs/xfs/xfs_mount.c @@ -314,8 +314,7 @@ STATIC int xfs_mount_validate_sb( xfs_mount_t *mp, xfs_sb_t *sbp, - bool check_inprogress, - bool check_version) + bool check_inprogress) { /* @@ -338,10 +337,9 @@ xfs_mount_validate_sb( /* * Version 5 superblock feature mask validation. Reject combinations the - * kernel cannot support up front before checking anything else. For - * write validation, we don't need to check feature masks. + * kernel cannot support up front before checking anything else. */ - if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) { + if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) { xfs_alert(mp, "Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n" "Use of these features in this kernel is at your own risk!"); @@ -677,8 +675,7 @@ xfs_sb_to_disk( static int xfs_sb_verify( - struct xfs_buf *bp, - bool check_version) + struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_sb sb; @@ -689,8 +686,7 @@ xfs_sb_verify( * Only check the in progress field for the primary superblock as * mkfs.xfs doesn't clear it from secondary superblocks. */ - return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR, - check_version); + return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR); } /* @@ -723,7 +719,7 @@ xfs_sb_read_verify( goto out_error; } } - error = xfs_sb_verify(bp, true); + error = xfs_sb_verify(bp); out_error: if (error) { @@ -762,7 +758,7 @@ xfs_sb_write_verify( struct xfs_buf_log_item *bip = bp->b_fspriv; int error; - error = xfs_sb_verify(bp, false); + error = xfs_sb_verify(bp); if (error) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, error); diff --git a/trunk/fs/xfs/xfs_qm.c b/trunk/fs/xfs/xfs_qm.c index b75c9bb6e71e..f41702b43003 100644 --- a/trunk/fs/xfs/xfs_qm.c +++ b/trunk/fs/xfs/xfs_qm.c @@ -41,7 +41,6 @@ #include "xfs_qm.h" #include "xfs_trace.h" #include "xfs_icache.h" -#include "xfs_cksum.h" /* * The global quota manager. There is only one of these for the entire @@ -840,7 +839,7 @@ xfs_qm_reset_dqcounts( xfs_dqid_t id, uint type) { - struct xfs_dqblk *dqb; + xfs_disk_dquot_t *ddq; int j; trace_xfs_reset_dqcounts(bp, _RET_IP_); @@ -854,12 +853,8 @@ xfs_qm_reset_dqcounts( do_div(j, sizeof(xfs_dqblk_t)); ASSERT(mp->m_quotainfo->qi_dqperchunk == j); #endif - dqb = bp->b_addr; + ddq = bp->b_addr; for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { - struct xfs_disk_dquot *ddq; - - ddq = (struct xfs_disk_dquot *)&dqb[j]; - /* * Do a sanity check, and if needed, repair the dqblk. Don't * output any warnings because it's perfectly possible to @@ -876,12 +871,7 @@ xfs_qm_reset_dqcounts( ddq->d_bwarns = 0; ddq->d_iwarns = 0; ddq->d_rtbwarns = 0; - - if (xfs_sb_version_hascrc(&mp->m_sb)) { - xfs_update_cksum((char *)&dqb[j], - sizeof(struct xfs_dqblk), - XFS_DQUOT_CRC_OFF); - } + ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); } } @@ -917,29 +907,19 @@ xfs_qm_dqiter_bufs( XFS_FSB_TO_DADDR(mp, bno), mp->m_quotainfo->qi_dqchunklen, 0, &bp, &xfs_dquot_buf_ops); - - /* - * CRC and validation errors will return a EFSCORRUPTED here. If - * this occurs, re-read without CRC validation so that we can - * repair the damage via xfs_qm_reset_dqcounts(). This process - * will leave a trace in the log indicating corruption has - * been detected. - */ - if (error == EFSCORRUPTED) { - error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, - XFS_FSB_TO_DADDR(mp, bno), - mp->m_quotainfo->qi_dqchunklen, 0, &bp, - NULL); - } - if (error) break; + /* + * XXX(hch): need to figure out if it makes sense to validate + * the CRC here. + */ xfs_qm_reset_dqcounts(mp, bp, firstid, type); xfs_buf_delwri_queue(bp, buffer_list); xfs_buf_relse(bp); - - /* goto the next block. */ + /* + * goto the next block. + */ bno++; firstid += mp->m_quotainfo->qi_dqperchunk; } diff --git a/trunk/fs/xfs/xfs_qm_syscalls.c b/trunk/fs/xfs/xfs_qm_syscalls.c index 6cdf6ffc36a1..c41190cad6e9 100644 --- a/trunk/fs/xfs/xfs_qm_syscalls.c +++ b/trunk/fs/xfs/xfs_qm_syscalls.c @@ -489,36 +489,31 @@ xfs_qm_scall_setqlim( if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) return 0; + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); + error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp), + 0, 0, XFS_DEFAULT_LOG_COUNT); + if (error) { + xfs_trans_cancel(tp, 0); + return (error); + } + /* * We don't want to race with a quotaoff so take the quotaoff lock. - * We don't hold an inode lock, so there's nothing else to stop - * a quotaoff from happening. + * (We don't hold an inode lock, so there's nothing else to stop + * a quotaoff from happening). (XXXThis doesn't currently happen + * because we take the vfslock before calling xfs_qm_sysent). */ mutex_lock(&q->qi_quotaofflock); /* - * Get the dquot (locked) before we start, as we need to do a - * transaction to allocate it if it doesn't exist. Once we have the - * dquot, unlock it so we can start the next transaction safely. We hold - * a reference to the dquot, so it's safe to do this unlock/lock without - * it being reclaimed in the mean time. + * Get the dquot (locked), and join it to the transaction. + * Allocate the dquot if this doesn't exist. */ - error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp); - if (error) { + if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { + xfs_trans_cancel(tp, XFS_TRANS_ABORT); ASSERT(error != ENOENT); goto out_unlock; } - xfs_dqunlock(dqp); - - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); - error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp), - 0, 0, XFS_DEFAULT_LOG_COUNT); - if (error) { - xfs_trans_cancel(tp, 0); - goto out_rele; - } - - xfs_dqlock(dqp); xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; @@ -626,10 +621,9 @@ xfs_qm_scall_setqlim( xfs_trans_log_dquot(tp, dqp); error = xfs_trans_commit(tp, 0); - -out_rele: xfs_qm_dqrele(dqp); -out_unlock: + + out_unlock: mutex_unlock(&q->qi_quotaofflock); return error; } diff --git a/trunk/fs/xfs/xfs_quota.h b/trunk/fs/xfs/xfs_quota.h index c38068f26c55..c61e31c7d997 100644 --- a/trunk/fs/xfs/xfs_quota.h +++ b/trunk/fs/xfs/xfs_quota.h @@ -87,8 +87,6 @@ typedef struct xfs_dqblk { uuid_t dd_uuid; /* location information */ } xfs_dqblk_t; -#define XFS_DQUOT_CRC_OFF offsetof(struct xfs_dqblk, dd_crc) - /* * flags for q_flags field in the dquot. */ diff --git a/trunk/fs/xfs/xfs_super.c b/trunk/fs/xfs/xfs_super.c index 3033ba5e9762..ea341cea68cb 100644 --- a/trunk/fs/xfs/xfs_super.c +++ b/trunk/fs/xfs/xfs_super.c @@ -1372,17 +1372,6 @@ xfs_finish_flags( } } - /* - * V5 filesystems always use attr2 format for attributes. - */ - if (xfs_sb_version_hascrc(&mp->m_sb) && - (mp->m_flags & XFS_MOUNT_NOATTR2)) { - xfs_warn(mp, -"Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.", - MNTOPT_NOATTR2, MNTOPT_ATTR2); - return XFS_ERROR(EINVAL); - } - /* * mkfs'ed attr2 will turn on attr2 mount unless explicitly * told by noattr2 to turn it off diff --git a/trunk/fs/xfs/xfs_symlink.c b/trunk/fs/xfs/xfs_symlink.c index 195a403e1522..5f234389327c 100644 --- a/trunk/fs/xfs/xfs_symlink.c +++ b/trunk/fs/xfs/xfs_symlink.c @@ -56,9 +56,16 @@ xfs_symlink_blocks( struct xfs_mount *mp, int pathlen) { - int buflen = XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize); + int fsblocks = 0; + int len = pathlen; - return (pathlen + buflen - 1) / buflen; + do { + fsblocks++; + len -= XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize); + } while (len > 0); + + ASSERT(fsblocks <= XFS_SYMLINK_MAPS); + return fsblocks; } static int @@ -398,7 +405,7 @@ xfs_symlink( if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version)) fs_blocks = 0; else - fs_blocks = xfs_symlink_blocks(mp, pathlen); + fs_blocks = XFS_B_TO_FSB(mp, pathlen); resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); @@ -505,7 +512,7 @@ xfs_symlink( cur_chunk = target_path; offset = 0; for (n = 0; n < nmaps; n++) { - char *buf; + char *buf; d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); @@ -518,7 +525,9 @@ xfs_symlink( bp->b_ops = &xfs_symlink_buf_ops; byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt); - byte_cnt = min(byte_cnt, pathlen); + if (pathlen < byte_cnt) { + byte_cnt = pathlen; + } buf = bp->b_addr; buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset, @@ -533,7 +542,6 @@ xfs_symlink( xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) - (char *)bp->b_addr); } - ASSERT(pathlen == 0); } /* diff --git a/trunk/fs/xfs/xfs_trace.h b/trunk/fs/xfs/xfs_trace.h index a04701de6bbd..aa4db3307d36 100644 --- a/trunk/fs/xfs/xfs_trace.h +++ b/trunk/fs/xfs/xfs_trace.h @@ -974,16 +974,14 @@ DEFINE_RW_EVENT(xfs_file_splice_read); DEFINE_RW_EVENT(xfs_file_splice_write); DECLARE_EVENT_CLASS(xfs_page_class, - TP_PROTO(struct inode *inode, struct page *page, unsigned long off, - unsigned int len), - TP_ARGS(inode, page, off, len), + TP_PROTO(struct inode *inode, struct page *page, unsigned long off), + TP_ARGS(inode, page, off), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(pgoff_t, pgoff) __field(loff_t, size) __field(unsigned long, offset) - __field(unsigned int, length) __field(int, delalloc) __field(int, unwritten) ), @@ -997,27 +995,24 @@ DECLARE_EVENT_CLASS(xfs_page_class, __entry->pgoff = page_offset(page); __entry->size = i_size_read(inode); __entry->offset = off; - __entry->length = len; __entry->delalloc = delalloc; __entry->unwritten = unwritten; ), TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " - "length %x delalloc %d unwritten %d", + "delalloc %d unwritten %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->pgoff, __entry->size, __entry->offset, - __entry->length, __entry->delalloc, __entry->unwritten) ) #define DEFINE_PAGE_EVENT(name) \ DEFINE_EVENT(xfs_page_class, name, \ - TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \ - unsigned int len), \ - TP_ARGS(inode, page, off, len)) + TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \ + TP_ARGS(inode, page, off)) DEFINE_PAGE_EVENT(xfs_writepage); DEFINE_PAGE_EVENT(xfs_releasepage); DEFINE_PAGE_EVENT(xfs_invalidatepage); diff --git a/trunk/fs/xfs/xfs_vnodeops.c b/trunk/fs/xfs/xfs_vnodeops.c index 0176bb21f09a..1501f4fa51a6 100644 --- a/trunk/fs/xfs/xfs_vnodeops.c +++ b/trunk/fs/xfs/xfs_vnodeops.c @@ -1453,7 +1453,7 @@ xfs_free_file_space( xfs_mount_t *mp; int nimap; uint resblks; - xfs_off_t rounding; + uint rounding; int rt; xfs_fileoff_t startoffset_fsb; xfs_trans_t *tp; @@ -1482,7 +1482,7 @@ xfs_free_file_space( inode_dio_wait(VFS_I(ip)); } - rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); + rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); ioffset = offset & ~(rounding - 1); error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset, -1); diff --git a/trunk/fs/xfs/xfs_vnodeops.h b/trunk/fs/xfs/xfs_vnodeops.h index 38c67c34d73f..5163022d9808 100644 --- a/trunk/fs/xfs/xfs_vnodeops.h +++ b/trunk/fs/xfs/xfs_vnodeops.h @@ -31,7 +31,8 @@ int xfs_remove(struct xfs_inode *dp, struct xfs_name *name, struct xfs_inode *ip); int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, struct xfs_name *target_name); -int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, size_t bufsize); +int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, + xfs_off_t *offset, filldir_t filldir); int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, const char *target_path, umode_t mode, struct xfs_inode **ipp); int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); diff --git a/trunk/include/acpi/acpi_bus.h b/trunk/include/acpi/acpi_bus.h index c13c919ab99e..98db31d9f9b4 100644 --- a/trunk/include/acpi/acpi_bus.h +++ b/trunk/include/acpi/acpi_bus.h @@ -377,20 +377,49 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle, unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); +#ifdef CONFIG_PM int acpi_bus_set_power(acpi_handle handle, int state); const char *acpi_power_state_string(int state); int acpi_device_get_power(struct acpi_device *device, int *state); int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); -int acpi_device_fix_up_power(struct acpi_device *device); int acpi_bus_update_power(acpi_handle handle, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); - -#ifdef CONFIG_PM bool acpi_bus_can_wakeup(acpi_handle handle); -#else -static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; } -#endif +#else /* !CONFIG_PM */ +static inline int acpi_bus_set_power(acpi_handle handle, int state) +{ + return 0; +} +static inline const char *acpi_power_state_string(int state) +{ + return "D0"; +} +static inline int acpi_device_get_power(struct acpi_device *device, int *state) +{ + return 0; +} +static inline int acpi_device_set_power(struct acpi_device *device, int state) +{ + return 0; +} +static inline int acpi_bus_init_power(struct acpi_device *device) +{ + return 0; +} +static inline int acpi_bus_update_power(acpi_handle handle, int *state_p) +{ + return 0; +} +static inline bool acpi_bus_power_manageable(acpi_handle handle) +{ + return false; +} +static inline bool acpi_bus_can_wakeup(acpi_handle handle) +{ + return false; +} +#endif /* !CONFIG_PM */ #ifdef CONFIG_ACPI_PROC_EVENT int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); diff --git a/trunk/include/acpi/acpi_drivers.h b/trunk/include/acpi/acpi_drivers.h index b420939f5eb5..e6168a24b9f0 100644 --- a/trunk/include/acpi/acpi_drivers.h +++ b/trunk/include/acpi/acpi_drivers.h @@ -123,9 +123,7 @@ extern int register_dock_notifier(struct notifier_block *nb); extern void unregister_dock_notifier(struct notifier_block *nb); extern int register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, - void *context, - void (*init)(void *), - void (*release)(void *)); + void *context); extern void unregister_hotplug_dock_device(acpi_handle handle); #else static inline int is_dock_device(acpi_handle handle) @@ -141,9 +139,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb) } static inline int register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, - void *context, - void (*init)(void *), - void (*release)(void *)) + void *context) { return -ENODEV; } diff --git a/trunk/include/acpi/acpiosxf.h b/trunk/include/acpi/acpiosxf.h index 64b8c7639520..5b3d2bd4813a 100644 --- a/trunk/include/acpi/acpiosxf.h +++ b/trunk/include/acpi/acpiosxf.h @@ -77,7 +77,7 @@ struct acpi_signal_fatal_info { /* * OSL Initialization and shutdown primitives */ -acpi_status __init acpi_os_initialize(void); +acpi_status __initdata acpi_os_initialize(void); acpi_status acpi_os_terminate(void); diff --git a/trunk/include/acpi/processor.h b/trunk/include/acpi/processor.h index ea69367fdd3b..b327b5a9296d 100644 --- a/trunk/include/acpi/processor.h +++ b/trunk/include/acpi/processor.h @@ -329,16 +329,10 @@ int acpi_processor_power_init(struct acpi_processor *pr); int acpi_processor_power_exit(struct acpi_processor *pr); int acpi_processor_cst_has_changed(struct acpi_processor *pr); int acpi_processor_hotplug(struct acpi_processor *pr); +int acpi_processor_suspend(struct device *dev); +int acpi_processor_resume(struct device *dev); extern struct cpuidle_driver acpi_idle_driver; -#ifdef CONFIG_PM_SLEEP -void acpi_processor_syscore_init(void); -void acpi_processor_syscore_exit(void); -#else -static inline void acpi_processor_syscore_init(void) {} -static inline void acpi_processor_syscore_exit(void) {} -#endif - /* in processor_thermal.c */ int acpi_processor_get_limit_info(struct acpi_processor *pr); extern const struct thermal_cooling_device_ops processor_cooling_ops; diff --git a/trunk/include/asm-generic/io.h b/trunk/include/asm-generic/io.h index d5afe96adba6..ac9da00e9f2c 100644 --- a/trunk/include/asm-generic/io.h +++ b/trunk/include/asm-generic/io.h @@ -343,12 +343,8 @@ extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_GENERIC_IOMAP */ #endif /* CONFIG_HAS_IOPORT */ -#ifndef xlate_dev_kmem_ptr #define xlate_dev_kmem_ptr(p) p -#endif -#ifndef xlate_dev_mem_ptr #define xlate_dev_mem_ptr(p) __va(p) -#endif #ifdef CONFIG_VIRT_TO_BUS #ifndef virt_to_bus diff --git a/trunk/include/asm-generic/kvm_para.h b/trunk/include/asm-generic/kvm_para.h index fa25becbdcaf..9d96605f160a 100644 --- a/trunk/include/asm-generic/kvm_para.h +++ b/trunk/include/asm-generic/kvm_para.h @@ -18,9 +18,4 @@ static inline unsigned int kvm_arch_para_features(void) return 0; } -static inline bool kvm_para_available(void) -{ - return false; -} - #endif diff --git a/trunk/include/asm-generic/pgtable.h b/trunk/include/asm-generic/pgtable.h index b1836987d506..a59ff51b0166 100644 --- a/trunk/include/asm-generic/pgtable.h +++ b/trunk/include/asm-generic/pgtable.h @@ -692,8 +692,4 @@ static inline pmd_t pmd_mknuma(pmd_t pmd) #endif /* !__ASSEMBLY__ */ -#ifndef io_remap_pfn_range -#define io_remap_pfn_range remap_pfn_range -#endif - #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/trunk/include/asm-generic/tlb.h b/trunk/include/asm-generic/tlb.h index 13821c339a41..b1b1fa6ffffe 100644 --- a/trunk/include/asm-generic/tlb.h +++ b/trunk/include/asm-generic/tlb.h @@ -97,9 +97,11 @@ struct mmu_gather { unsigned long start; unsigned long end; unsigned int need_flush : 1, /* Did free PTEs */ + fast_mode : 1; /* No batching */ + /* we are in the middle of an operation to clear * a full mm and can make some optimizations */ - fullmm : 1, + unsigned int fullmm : 1, /* we have performed an operation which * requires a complete flush of the tlb */ need_flush_all : 1; @@ -112,6 +114,19 @@ struct mmu_gather { #define HAVE_GENERIC_MMU_GATHER +static inline int tlb_fast_mode(struct mmu_gather *tlb) +{ +#ifdef CONFIG_SMP + return tlb->fast_mode; +#else + /* + * For UP we don't need to worry about TLB flush + * and page free order so much.. + */ + return 1; +#endif +} + void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, diff --git a/trunk/include/drm/drmP.h b/trunk/include/drm/drmP.h index 63d17ee9eb48..61196592152e 100644 --- a/trunk/include/drm/drmP.h +++ b/trunk/include/drm/drmP.h @@ -316,7 +316,6 @@ struct drm_ioctl_desc { int flags; drm_ioctl_t *func; unsigned int cmd_drv; - const char *name; }; /** @@ -325,7 +324,7 @@ struct drm_ioctl_desc { */ #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ - [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl} + [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} struct drm_magic_entry { struct list_head head; diff --git a/trunk/include/drm/drm_fb_helper.h b/trunk/include/drm/drm_fb_helper.h index 471f276ce8f7..8230b46fdd73 100644 --- a/trunk/include/drm/drm_fb_helper.h +++ b/trunk/include/drm/drm_fb_helper.h @@ -50,14 +50,13 @@ struct drm_fb_helper_surface_size { /** * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library - * @gamma_set: Set the given gamma lut register on the given crtc. - * @gamma_get: Read the given gamma lut register on the given crtc, used to - * save the current lut when force-restoring the fbdev for e.g. - * kdbg. - * @fb_probe: Driver callback to allocate and initialize the fbdev info - * structure. Futhermore it also needs to allocate the drm - * framebuffer used to back the fbdev. - * @initial_config: Setup an initial fbdev display configuration + * @gamma_set: - Set the given gamma lut register on the given crtc. + * @gamma_get: - Read the given gamma lut register on the given crtc, used to + * save the current lut when force-restoring the fbdev for e.g. + * kdbg. + * @fb_probe: - Driver callback to allocate and initialize the fbdev info + * structure. Futhermore it also needs to allocate the drm + * framebuffer used to back the fbdev. * * Driver callbacks used by the fbdev emulation helper library. */ diff --git a/trunk/include/drm/drm_os_linux.h b/trunk/include/drm/drm_os_linux.h index 675ddf4b441f..393369147a2d 100644 --- a/trunk/include/drm/drm_os_linux.h +++ b/trunk/include/drm/drm_os_linux.h @@ -87,6 +87,15 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) /** Other copying of data from kernel space */ #define DRM_COPY_TO_USER(arg1, arg2, arg3) \ copy_to_user(arg1, arg2, arg3) +/* Macros for copyfrom user, but checking readability only once */ +#define DRM_VERIFYAREA_READ( uaddr, size ) \ + (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT) +#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ + __copy_from_user(arg1, arg2, arg3) +#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \ + __copy_to_user(arg1, arg2, arg3) +#define DRM_GET_USER_UNCHECKED(val, uaddr) \ + __get_user(val, uaddr) #define DRM_HZ HZ diff --git a/trunk/include/drm/drm_pciids.h b/trunk/include/drm/drm_pciids.h index bb1bc485390b..c2af598f701d 100644 --- a/trunk/include/drm/drm_pciids.h +++ b/trunk/include/drm/drm_pciids.h @@ -152,12 +152,6 @@ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ diff --git a/trunk/include/linux/acpi_dma.h b/trunk/include/linux/acpi_dma.h index fb0298082916..d09deabc7bf6 100644 --- a/trunk/include/linux/acpi_dma.h +++ b/trunk/include/linux/acpi_dma.h @@ -37,8 +37,6 @@ struct acpi_dma_spec { * @dev: struct device of this controller * @acpi_dma_xlate: callback function to find a suitable channel * @data: private data used by a callback function - * @base_request_line: first supported request line (CSRT) - * @end_request_line: last supported request line (CSRT) */ struct acpi_dma { struct list_head dma_controllers; @@ -46,8 +44,6 @@ struct acpi_dma { struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *); void *data; - unsigned short base_request_line; - unsigned short end_request_line; }; /* Used with acpi_dma_simple_xlate() */ diff --git a/trunk/include/linux/aer.h b/trunk/include/linux/aer.h index 737f90ab4b62..ec10e1b24c1c 100644 --- a/trunk/include/linux/aer.h +++ b/trunk/include/linux/aer.h @@ -49,11 +49,10 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) } #endif -extern void cper_print_aer(struct pci_dev *dev, +extern void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity, struct aer_capability_regs *aer); extern int cper_severity_to_aer(int cper_severity); extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, - int severity, - struct aer_capability_regs *aer_regs); + int severity); #endif //_AER_H_ diff --git a/trunk/include/linux/bcma/bcma.h b/trunk/include/linux/bcma/bcma.h index 2e34db82a643..f14a98a79c9d 100644 --- a/trunk/include/linux/bcma/bcma.h +++ b/trunk/include/linux/bcma/bcma.h @@ -134,10 +134,7 @@ struct bcma_host_ops { #define BCMA_CORE_I2S 0x834 #define BCMA_CORE_SDR_DDR1_MEM_CTL 0x835 /* SDR/DDR1 memory controller core */ #define BCMA_CORE_SHIM 0x837 /* SHIM component in ubus/6362 */ -#define BCMA_CORE_PHY_AC 0x83B -#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ -#define BCMA_CORE_USB30_DEV 0x83D -#define BCMA_CORE_ARM_CR4 0x83E +#define BCMA_CORE_ARM_CR4 0x83e #define BCMA_CORE_DEFAULT 0xFFF #define BCMA_MAX_NR_CORES 16 diff --git a/trunk/include/linux/brcmphy.h b/trunk/include/linux/brcmphy.h index 677b4f01b2d0..b840a4960282 100644 --- a/trunk/include/linux/brcmphy.h +++ b/trunk/include/linux/brcmphy.h @@ -1,6 +1,3 @@ -#ifndef _LINUX_BRCMPHY_H -#define _LINUX_BRCMPHY_H - #define PHY_ID_BCM50610 0x0143bd60 #define PHY_ID_BCM50610M 0x0143bd70 #define PHY_ID_BCM5241 0x0143bc30 @@ -32,5 +29,3 @@ #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 #define PHY_BCM_FLAGS_VALID 0x80000000 - -#endif /* _LINUX_BRCMPHY_H */ diff --git a/trunk/include/linux/buffer_head.h b/trunk/include/linux/buffer_head.h index f5a3b838ddb0..9e52b0626b39 100644 --- a/trunk/include/linux/buffer_head.h +++ b/trunk/include/linux/buffer_head.h @@ -198,8 +198,7 @@ extern int buffer_heads_over_limit; * Generic address_space_operations implementations for buffer_head-backed * address_spaces. */ -void block_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); +void block_invalidatepage(struct page *page, unsigned long offset); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); int block_write_full_page_endio(struct page *page, get_block_t *get_block, diff --git a/trunk/include/linux/cgroup.h b/trunk/include/linux/cgroup.h index 8bda1294c035..5047355b9a0f 100644 --- a/trunk/include/linux/cgroup.h +++ b/trunk/include/linux/cgroup.h @@ -707,7 +707,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos); * * If a subsystem synchronizes against the parent in its ->css_online() and * before starting iterating, and synchronizes against @pos on each - * iteration, any descendant cgroup which finished ->css_online() is + * iteration, any descendant cgroup which finished ->css_offline() is * guaranteed to be visible in the future iterations. * * In other words, the following guarantees that a descendant can't escape diff --git a/trunk/include/linux/context_tracking.h b/trunk/include/linux/context_tracking.h index fc09d7b0dacf..365f4a61bf04 100644 --- a/trunk/include/linux/context_tracking.h +++ b/trunk/include/linux/context_tracking.h @@ -3,7 +3,6 @@ #include #include -#include #include struct context_tracking { @@ -20,26 +19,6 @@ struct context_tracking { } state; }; -static inline void __guest_enter(void) -{ - /* - * This is running in ioctl context so we can avoid - * the call to vtime_account() with its unnecessary idle check. - */ - vtime_account_system(current); - current->flags |= PF_VCPU; -} - -static inline void __guest_exit(void) -{ - /* - * This is running in ioctl context so we can avoid - * the call to vtime_account() with its unnecessary idle check. - */ - vtime_account_system(current); - current->flags &= ~PF_VCPU; -} - #ifdef CONFIG_CONTEXT_TRACKING DECLARE_PER_CPU(struct context_tracking, context_tracking); @@ -56,9 +35,6 @@ static inline bool context_tracking_active(void) extern void user_enter(void); extern void user_exit(void); -extern void guest_enter(void); -extern void guest_exit(void); - static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; @@ -81,17 +57,6 @@ extern void context_tracking_task_switch(struct task_struct *prev, static inline bool context_tracking_in_user(void) { return false; } static inline void user_enter(void) { } static inline void user_exit(void) { } - -static inline void guest_enter(void) -{ - __guest_enter(); -} - -static inline void guest_exit(void) -{ - __guest_exit(); -} - static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline void context_tracking_task_switch(struct task_struct *prev, diff --git a/trunk/include/linux/cpu.h b/trunk/include/linux/cpu.h index 9f3c7e81270a..c6f6e0839b61 100644 --- a/trunk/include/linux/cpu.h +++ b/trunk/include/linux/cpu.h @@ -175,8 +175,6 @@ extern struct bus_type cpu_subsys; extern void get_online_cpus(void); extern void put_online_cpus(void); -extern void cpu_hotplug_disable(void); -extern void cpu_hotplug_enable(void); #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) @@ -200,8 +198,6 @@ static inline void cpu_hotplug_driver_unlock(void) #define get_online_cpus() do { } while (0) #define put_online_cpus() do { } while (0) -#define cpu_hotplug_disable() do { } while (0) -#define cpu_hotplug_enable() do { } while (0) #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) /* These aren't inline functions due to a GCC bug. */ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) diff --git a/trunk/include/linux/f2fs_fs.h b/trunk/include/linux/f2fs_fs.h index 383d5e39b280..df6fab82f87e 100644 --- a/trunk/include/linux/f2fs_fs.h +++ b/trunk/include/linux/f2fs_fs.h @@ -20,8 +20,8 @@ #define F2FS_BLKSIZE 4096 /* support only 4KB block */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ -#define NULL_ADDR ((block_t)0) /* used as block_t addresses */ -#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ +#define NULL_ADDR 0x0U +#define NEW_ADDR -1U #define F2FS_ROOT_INO(sbi) (sbi->root_ino_num) #define F2FS_NODE_INO(sbi) (sbi->node_ino_num) diff --git a/trunk/include/linux/filter.h b/trunk/include/linux/filter.h index f65f5a69db8f..c050dcc322a4 100644 --- a/trunk/include/linux/filter.h +++ b/trunk/include/linux/filter.h @@ -46,7 +46,6 @@ extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); extern int sk_detach_filter(struct sock *sk); extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len); -extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to); #ifdef CONFIG_BPF_JIT #include diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index f8a5240541b7..43db02e9c9fa 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -364,7 +364,7 @@ struct address_space_operations { /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); - void (*invalidatepage) (struct page *, unsigned int, unsigned int); + void (*invalidatepage) (struct page *, unsigned long); int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, @@ -1506,11 +1506,6 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); * to have different dirent layouts depending on the binary type. */ typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); -struct dir_context { - const filldir_t actor; - loff_t pos; -}; - struct block_device_operations; /* These macros are for out of kernel modules to test that @@ -1526,7 +1521,7 @@ struct file_operations { ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); - int (*iterate) (struct file *, struct dir_context *); + int (*readdir) (struct file *, void *, filldir_t); unsigned int (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); @@ -2419,6 +2414,8 @@ extern ssize_t generic_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *, size_t len, unsigned int flags); +extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, + size_t len, unsigned int flags); extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); @@ -2499,7 +2496,6 @@ loff_t inode_get_bytes(struct inode *inode); void inode_set_bytes(struct inode *inode, loff_t bytes); extern int vfs_readdir(struct file *, filldir_t, void *); -extern int iterate_dir(struct file *, struct dir_context *); extern int vfs_stat(const char __user *, struct kstat *); extern int vfs_lstat(const char __user *, struct kstat *); @@ -2530,7 +2526,7 @@ extern void iterate_supers_type(struct file_system_type *, extern int dcache_dir_open(struct inode *, struct file *); extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); -extern int dcache_readdir(struct file *, struct dir_context *); +extern int dcache_readdir(struct file *, void *, filldir_t); extern int simple_setattr(struct dentry *, struct iattr *); extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int simple_statfs(struct dentry *, struct kstatfs *); @@ -2694,41 +2690,4 @@ static inline void inode_has_no_xattr(struct inode *inode) inode->i_flags |= S_NOSEC; } -static inline bool dir_emit(struct dir_context *ctx, - const char *name, int namelen, - u64 ino, unsigned type) -{ - return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0; -} -static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx) -{ - return ctx->actor(ctx, ".", 1, ctx->pos, - file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0; -} -static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx) -{ - return ctx->actor(ctx, "..", 2, ctx->pos, - parent_ino(file->f_path.dentry), DT_DIR) == 0; -} -static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx) -{ - if (ctx->pos == 0) { - if (!dir_emit_dot(file, ctx)) - return false; - ctx->pos = 1; - } - if (ctx->pos == 1) { - if (!dir_emit_dotdot(file, ctx)) - return false; - ctx->pos = 2; - } - return true; -} -static inline bool dir_relax(struct inode *inode) -{ - mutex_unlock(&inode->i_mutex); - mutex_lock(&inode->i_mutex); - return !IS_DEADDIR(inode); -} - #endif /* _LINUX_FS_H */ diff --git a/trunk/include/linux/fscache-cache.h b/trunk/include/linux/fscache-cache.h index a9ff9a36b86d..5dfa0aa216b6 100644 --- a/trunk/include/linux/fscache-cache.h +++ b/trunk/include/linux/fscache-cache.h @@ -97,8 +97,7 @@ struct fscache_operation { #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ #define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */ -#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */ -#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */ +#define FSCACHE_OP_KEEP_FLAGS 0x0070 /* flags to keep when repurposing an op */ enum fscache_operation_state state; atomic_t usage; @@ -151,7 +150,7 @@ struct fscache_retrieval { void *context; /* netfs read context (pinned) */ struct list_head to_do; /* list of things to be done by the backend */ unsigned long start_time; /* time at which retrieval started */ - atomic_t n_pages; /* number of pages to be retrieved */ + unsigned n_pages; /* number of pages to be retrieved */ }; typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, @@ -195,14 +194,15 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) static inline void fscache_retrieval_complete(struct fscache_retrieval *op, int n_pages) { - atomic_sub(n_pages, &op->n_pages); - if (atomic_read(&op->n_pages) <= 0) + op->n_pages -= n_pages; + if (op->n_pages <= 0) fscache_op_complete(&op->op, true); } /** * fscache_put_retrieval - Drop a reference to a retrieval operation * @op: The retrieval operation affected + * @n_pages: The number of pages to account for * * Drop a reference to a retrieval operation. */ @@ -314,7 +314,6 @@ struct fscache_cache_ops { struct fscache_cookie { atomic_t usage; /* number of users of this cookie */ atomic_t n_children; /* number of children of this cookie */ - atomic_t n_active; /* number of active users of netfs ptrs */ spinlock_t lock; spinlock_t stores_lock; /* lock on page store tree */ struct hlist_head backing_objects; /* object(s) backing this file/index */ @@ -327,11 +326,13 @@ struct fscache_cookie { unsigned long flags; #define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ -#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */ -#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */ -#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */ -#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */ -#define FSCACHE_COOKIE_RETIRED 5 /* T if cookie was retired */ +#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */ +#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */ +#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */ +#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */ +#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */ +#define FSCACHE_COOKIE_WAITING_ON_READS 6 /* T if cookie is waiting on reads */ +#define FSCACHE_COOKIE_INVALIDATING 7 /* T if cookie is being invalidated */ }; extern struct fscache_cookie fscache_fsdef_index; @@ -340,40 +341,45 @@ extern struct fscache_cookie fscache_fsdef_index; * Event list for fscache_object::{event_mask,events} */ enum { - FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */ - FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */ + FSCACHE_OBJECT_EV_REQUEUE, /* T if object should be requeued */ FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */ FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */ FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */ FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */ - FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */ + FSCACHE_OBJECT_EV_RELEASE, /* T if netfs requested object release */ + FSCACHE_OBJECT_EV_RETIRE, /* T if netfs requested object retirement */ + FSCACHE_OBJECT_EV_WITHDRAW, /* T if cache requested object withdrawal */ NR_FSCACHE_OBJECT_EVENTS }; #define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1) -/* - * States for object state machine. - */ -struct fscache_transition { - unsigned long events; - const struct fscache_state *transit_to; -}; - -struct fscache_state { - char name[24]; - char short_name[8]; - const struct fscache_state *(*work)(struct fscache_object *object, - int event); - const struct fscache_transition transitions[]; -}; - /* * on-disk cache file or index handle */ struct fscache_object { - const struct fscache_state *state; /* Object state machine state */ - const struct fscache_transition *oob_table; /* OOB state transition table */ + enum fscache_object_state { + FSCACHE_OBJECT_INIT, /* object in initial unbound state */ + FSCACHE_OBJECT_LOOKING_UP, /* looking up object */ + FSCACHE_OBJECT_CREATING, /* creating object */ + + /* active states */ + FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */ + FSCACHE_OBJECT_ACTIVE, /* object is usable */ + FSCACHE_OBJECT_INVALIDATING, /* object is invalidating */ + FSCACHE_OBJECT_UPDATING, /* object is updating */ + + /* terminal states */ + FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */ + FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */ + FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */ + FSCACHE_OBJECT_RELEASING, /* releasing object */ + FSCACHE_OBJECT_RECYCLING, /* retiring object */ + FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ + FSCACHE_OBJECT_DEAD, /* object is now dead */ + FSCACHE_OBJECT__NSTATES + } state; + int debug_id; /* debugging ID */ int n_children; /* number of child objects */ int n_ops; /* number of extant ops on object */ @@ -384,7 +390,6 @@ struct fscache_object { spinlock_t lock; /* state and operations lock */ unsigned long lookup_jif; /* time at which lookup started */ - unsigned long oob_event_mask; /* OOB events this object is interested in */ unsigned long event_mask; /* events this object is interested in */ unsigned long events; /* events to be processed by this object * (order is important - using fls) */ @@ -393,9 +398,6 @@ struct fscache_object { #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ #define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */ #define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */ -#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */ -#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ -#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ struct list_head cache_link; /* link in cache->object_list */ struct hlist_node cookie_link; /* link in cookie->backing_objects */ @@ -413,40 +415,62 @@ struct fscache_object { loff_t store_limit_l; /* current storage limit */ }; -extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *, - struct fscache_cache *); -extern void fscache_object_destroy(struct fscache_object *); +extern const char *fscache_object_states[]; -extern void fscache_object_lookup_negative(struct fscache_object *object); -extern void fscache_obtained_object(struct fscache_object *object); +#define fscache_object_is_active(obj) \ + (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ + (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ + (obj)->state < FSCACHE_OBJECT_DYING) -static inline bool fscache_object_is_live(struct fscache_object *object) -{ - return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); -} +#define fscache_object_is_dead(obj) \ + (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ + (obj)->state >= FSCACHE_OBJECT_DYING) -static inline bool fscache_object_is_dying(struct fscache_object *object) -{ - return !fscache_object_is_live(object); -} +extern void fscache_object_work_func(struct work_struct *work); -static inline bool fscache_object_is_available(struct fscache_object *object) +/** + * fscache_object_init - Initialise a cache object description + * @object: Object description + * + * Initialise a cache object description to its basic values. + * + * See Documentation/filesystems/caching/backend-api.txt for a complete + * description. + */ +static inline +void fscache_object_init(struct fscache_object *object, + struct fscache_cookie *cookie, + struct fscache_cache *cache) { - return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); + atomic_inc(&cache->object_count); + + object->state = FSCACHE_OBJECT_INIT; + spin_lock_init(&object->lock); + INIT_LIST_HEAD(&object->cache_link); + INIT_HLIST_NODE(&object->cookie_link); + INIT_WORK(&object->work, fscache_object_work_func); + INIT_LIST_HEAD(&object->dependents); + INIT_LIST_HEAD(&object->dep_link); + INIT_LIST_HEAD(&object->pending_ops); + object->n_children = 0; + object->n_ops = object->n_in_progress = object->n_exclusive = 0; + object->events = object->event_mask = 0; + object->flags = 0; + object->store_limit = 0; + object->store_limit_l = 0; + object->cache = cache; + object->cookie = cookie; + object->parent = NULL; } -static inline bool fscache_object_is_active(struct fscache_object *object) -{ - return fscache_object_is_available(object) && - fscache_object_is_live(object) && - !test_bit(FSCACHE_IOERROR, &object->cache->flags); -} +extern void fscache_object_lookup_negative(struct fscache_object *object); +extern void fscache_obtained_object(struct fscache_object *object); -static inline bool fscache_object_is_dead(struct fscache_object *object) -{ - return fscache_object_is_dying(object) && - test_bit(FSCACHE_IOERROR, &object->cache->flags); -} +#ifdef CONFIG_FSCACHE_OBJECT_LIST +extern void fscache_object_destroy(struct fscache_object *object); +#else +#define fscache_object_destroy(object) do {} while(0) +#endif /** * fscache_object_destroyed - Note destruction of an object in a cache @@ -507,33 +531,6 @@ static inline void fscache_end_io(struct fscache_retrieval *op, op->end_io_func(page, op->context, error); } -/** - * fscache_use_cookie - Request usage of cookie attached to an object - * @object: Object description - * - * Request usage of the cookie attached to an object. NULL is returned if the - * relinquishment had reduced the cookie usage count to 0. - */ -static inline bool fscache_use_cookie(struct fscache_object *object) -{ - struct fscache_cookie *cookie = object->cookie; - return atomic_inc_not_zero(&cookie->n_active) != 0; -} - -/** - * fscache_unuse_cookie - Cease usage of cookie attached to an object - * @object: Object description - * - * Cease usage of the cookie attached to an object. When the users count - * reaches zero then the cookie relinquishment will be permitted to proceed. - */ -static inline void fscache_unuse_cookie(struct fscache_object *object) -{ - struct fscache_cookie *cookie = object->cookie; - if (atomic_dec_and_test(&cookie->n_active)) - wake_up_atomic_t(&cookie->n_active); -} - /* * out-of-line cache backend functions */ diff --git a/trunk/include/linux/if_team.h b/trunk/include/linux/if_team.h index 16fae6436d0e..4474557904f6 100644 --- a/trunk/include/linux/if_team.h +++ b/trunk/include/linux/if_team.h @@ -249,12 +249,12 @@ team_get_first_port_txable_rcu(struct team *team, struct team_port *port) return port; cur = port; list_for_each_entry_continue_rcu(cur, &team->port_list, list) - if (team_port_txable(cur)) + if (team_port_txable(port)) return cur; list_for_each_entry_rcu(cur, &team->port_list, list) { if (cur == port) break; - if (team_port_txable(cur)) + if (team_port_txable(port)) return cur; } return NULL; diff --git a/trunk/include/linux/if_vlan.h b/trunk/include/linux/if_vlan.h index 637fa71de0c7..52bd03b38962 100644 --- a/trunk/include/linux/if_vlan.h +++ b/trunk/include/linux/if_vlan.h @@ -44,7 +44,7 @@ struct vlan_hdr { * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) * @h_dest: destination ethernet address * @h_source: source ethernet address - * @h_vlan_proto: ethernet protocol + * @h_vlan_proto: ethernet protocol (always 0x8100) * @h_vlan_TCI: priority and VLAN ID * @h_vlan_encapsulated_proto: packet type ID or len */ diff --git a/trunk/include/linux/jbd.h b/trunk/include/linux/jbd.h index 8685d1be12c7..7e0b622503c4 100644 --- a/trunk/include/linux/jbd.h +++ b/trunk/include/linux/jbd.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -243,31 +244,6 @@ typedef struct journal_superblock_s #include #include - -enum jbd_state_bits { - BH_JBD /* Has an attached ext3 journal_head */ - = BH_PrivateStart, - BH_JWrite, /* Being written to log (@@@ DEBUGGING) */ - BH_Freed, /* Has been freed (truncated) */ - BH_Revoked, /* Has been revoked from the log */ - BH_RevokeValid, /* Revoked flag is valid */ - BH_JBDDirty, /* Is dirty but journaled */ - BH_State, /* Pins most journal_head state */ - BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ - BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */ - BH_JBDPrivateStart, /* First bit available for private use by FS */ -}; - -BUFFER_FNS(JBD, jbd) -BUFFER_FNS(JWrite, jwrite) -BUFFER_FNS(JBDDirty, jbddirty) -TAS_BUFFER_FNS(JBDDirty, jbddirty) -BUFFER_FNS(Revoked, revoked) -TAS_BUFFER_FNS(Revoked, revoked) -BUFFER_FNS(RevokeValid, revokevalid) -TAS_BUFFER_FNS(RevokeValid, revokevalid) -BUFFER_FNS(Freed, freed) - #include #define J_ASSERT(assert) BUG_ON(!(assert)) @@ -864,7 +840,7 @@ extern void journal_release_buffer (handle_t *, struct buffer_head *); extern int journal_forget (handle_t *, struct buffer_head *); extern void journal_sync_buffer (struct buffer_head *); extern void journal_invalidatepage(journal_t *, - struct page *, unsigned int, unsigned int); + struct page *, unsigned long); extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); extern int journal_stop(handle_t *); extern int journal_flush (journal_t *); diff --git a/trunk/include/linux/jbd2.h b/trunk/include/linux/jbd2.h index d5b50a19463c..6e051f472edb 100644 --- a/trunk/include/linux/jbd2.h +++ b/trunk/include/linux/jbd2.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -56,13 +57,17 @@ */ #define JBD2_EXPENSIVE_CHECKING extern ushort jbd2_journal_enable_debug; -void __jbd2_debug(int level, const char *file, const char *func, - unsigned int line, const char *fmt, ...); -#define jbd_debug(n, fmt, a...) \ - __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a) +#define jbd_debug(n, f, a...) \ + do { \ + if ((n) <= jbd2_journal_enable_debug) { \ + printk (KERN_DEBUG "(%s, %d): %s: ", \ + __FILE__, __LINE__, __func__); \ + printk (f, ## a); \ + } \ + } while (0) #else -#define jbd_debug(n, fmt, a...) /**/ +#define jbd_debug(f, a...) /**/ #endif extern void *jbd2_alloc(size_t size, gfp_t flags); @@ -297,34 +302,6 @@ typedef struct journal_superblock_s #include #include - -enum jbd_state_bits { - BH_JBD /* Has an attached ext3 journal_head */ - = BH_PrivateStart, - BH_JWrite, /* Being written to log (@@@ DEBUGGING) */ - BH_Freed, /* Has been freed (truncated) */ - BH_Revoked, /* Has been revoked from the log */ - BH_RevokeValid, /* Revoked flag is valid */ - BH_JBDDirty, /* Is dirty but journaled */ - BH_State, /* Pins most journal_head state */ - BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ - BH_Shadow, /* IO on shadow buffer is running */ - BH_Verified, /* Metadata block has been verified ok */ - BH_JBDPrivateStart, /* First bit available for private use by FS */ -}; - -BUFFER_FNS(JBD, jbd) -BUFFER_FNS(JWrite, jwrite) -BUFFER_FNS(JBDDirty, jbddirty) -TAS_BUFFER_FNS(JBDDirty, jbddirty) -BUFFER_FNS(Revoked, revoked) -TAS_BUFFER_FNS(Revoked, revoked) -BUFFER_FNS(RevokeValid, revokevalid) -TAS_BUFFER_FNS(RevokeValid, revokevalid) -BUFFER_FNS(Freed, freed) -BUFFER_FNS(Shadow, shadow) -BUFFER_FNS(Verified, verified) - #include #define J_ASSERT(assert) BUG_ON(!(assert)) @@ -405,15 +382,8 @@ struct jbd2_revoke_table_s; struct jbd2_journal_handle { - union { - /* Which compound transaction is this update a part of? */ - transaction_t *h_transaction; - /* Which journal handle belongs to - used iff h_reserved set */ - journal_t *h_journal; - }; - - /* Handle reserved for finishing the logical operation */ - handle_t *h_rsv_handle; + /* Which compound transaction is this update a part of? */ + transaction_t *h_transaction; /* Number of remaining buffers we are allowed to dirty: */ int h_buffer_credits; @@ -428,7 +398,6 @@ struct jbd2_journal_handle /* Flags [no locking] */ unsigned int h_sync: 1; /* sync-on-close */ unsigned int h_jdata: 1; /* force data journaling */ - unsigned int h_reserved: 1; /* handle with reserved credits */ unsigned int h_aborted: 1; /* fatal error on handle */ unsigned int h_type: 8; /* for handle statistics */ unsigned int h_line_no: 16; /* for handle statistics */ @@ -554,6 +523,12 @@ struct transaction_s */ struct journal_head *t_checkpoint_io_list; + /* + * Doubly-linked circular list of temporary buffers currently undergoing + * IO in the log [j_list_lock] + */ + struct journal_head *t_iobuf_list; + /* * Doubly-linked circular list of metadata buffers being shadowed by log * IO. The IO buffers on the iobuf list and the shadow buffers on this @@ -561,6 +536,12 @@ struct transaction_s */ struct journal_head *t_shadow_list; + /* + * Doubly-linked circular list of control buffers being written to the + * log. [j_list_lock] + */ + struct journal_head *t_log_list; + /* * List of inodes whose data we've modified in data=ordered mode. * [j_list_lock] @@ -690,10 +671,11 @@ jbd2_time_diff(unsigned long start, unsigned long end) * waiting for checkpointing * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction * to start committing, or for a barrier lock to be released + * @j_wait_logspace: Wait queue for waiting for checkpointing to complete * @j_wait_done_commit: Wait queue for waiting for commit to complete + * @j_wait_checkpoint: Wait queue to trigger checkpointing * @j_wait_commit: Wait queue to trigger commit * @j_wait_updates: Wait queue to wait for updates to complete - * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints * @j_head: Journal head - identifies the first unused block in the journal * @j_tail: Journal tail - identifies the oldest still-used block in the @@ -707,7 +689,6 @@ jbd2_time_diff(unsigned long start, unsigned long end) * journal * @j_fs_dev: Device which holds the client fs. For internal journal this will * be equal to j_dev - * @j_reserved_credits: Number of buffers reserved from the running transaction * @j_maxlen: Total maximum capacity of the journal region on disk. * @j_list_lock: Protects the buffer lists and internal buffer state. * @j_inode: Optional inode where we store the journal. If present, all journal @@ -797,18 +778,21 @@ struct journal_s */ wait_queue_head_t j_wait_transaction_locked; + /* Wait queue for waiting for checkpointing to complete */ + wait_queue_head_t j_wait_logspace; + /* Wait queue for waiting for commit to complete */ wait_queue_head_t j_wait_done_commit; + /* Wait queue to trigger checkpointing */ + wait_queue_head_t j_wait_checkpoint; + /* Wait queue to trigger commit */ wait_queue_head_t j_wait_commit; /* Wait queue to wait for updates to complete */ wait_queue_head_t j_wait_updates; - /* Wait queue to wait for reserved buffer credits to drop */ - wait_queue_head_t j_wait_reserved; - /* Semaphore for locking against concurrent checkpoints */ struct mutex j_checkpoint_mutex; @@ -863,9 +847,6 @@ struct journal_s /* Total maximum capacity of the journal region on disk. */ unsigned int j_maxlen; - /* Number of buffers reserved from the running transaction */ - atomic_t j_reserved_credits; - /* * Protects the buffer lists and internal buffer state. */ @@ -1010,17 +991,9 @@ extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, i extern void __journal_free_buffer(struct journal_head *bh); extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); extern void __journal_clean_data_list(transaction_t *transaction); -static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh) -{ - list_add_tail(&bh->b_assoc_buffers, head); -} -static inline void jbd2_unfile_log_bh(struct buffer_head *bh) -{ - list_del_init(&bh->b_assoc_buffers); -} /* Log buffer allocation */ -struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal); +extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *); int jbd2_journal_next_log_block(journal_t *, unsigned long long *); int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, unsigned long *block); @@ -1066,10 +1039,11 @@ extern void jbd2_buffer_abort_trigger(struct journal_head *jh, struct jbd2_buffer_trigger_type *triggers); /* Buffer IO */ -extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction, - struct journal_head *jh_in, - struct buffer_head **bh_out, - sector_t blocknr); +extern int +jbd2_journal_write_metadata_buffer(transaction_t *transaction, + struct journal_head *jh_in, + struct journal_head **jh_out, + unsigned long long blocknr); /* Transaction locking */ extern void __wait_on_journal (journal_t *); @@ -1102,14 +1076,10 @@ static inline handle_t *journal_current_handle(void) */ extern handle_t *jbd2_journal_start(journal_t *, int nblocks); -extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks, - gfp_t gfp_mask, unsigned int type, - unsigned int line_no); +extern handle_t *jbd2__journal_start(journal_t *, int nblocks, gfp_t gfp_mask, + unsigned int type, unsigned int line_no); extern int jbd2_journal_restart(handle_t *, int nblocks); extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask); -extern int jbd2_journal_start_reserved(handle_t *handle, - unsigned int type, unsigned int line_no); -extern void jbd2_journal_free_reserved(handle_t *handle); extern int jbd2_journal_extend (handle_t *, int nblocks); extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); @@ -1120,7 +1090,7 @@ extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); extern void journal_sync_buffer (struct buffer_head *); extern int jbd2_journal_invalidatepage(journal_t *, - struct page *, unsigned int, unsigned int); + struct page *, unsigned long); extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); extern int jbd2_journal_stop(handle_t *); extern int jbd2_journal_flush (journal_t *); @@ -1155,7 +1125,6 @@ extern void jbd2_journal_ack_err (journal_t *); extern int jbd2_journal_clear_err (journal_t *); extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); extern int jbd2_journal_force_commit(journal_t *); -extern int jbd2_journal_force_commit_nested(journal_t *); extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode); extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *inode, loff_t new_size); @@ -1209,10 +1178,8 @@ extern int jbd2_journal_init_revoke_caches(void); extern void jbd2_journal_destroy_revoke(journal_t *); extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *); -extern void jbd2_journal_write_revoke_records(journal_t *journal, - transaction_t *transaction, - struct list_head *log_bufs, - int write_op); +extern void jbd2_journal_write_revoke_records(journal_t *, + transaction_t *, int); /* Recovery revoke support */ extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t); @@ -1228,9 +1195,11 @@ extern void jbd2_clear_buffer_revoked_flags(journal_t *journal); * transitions on demand. */ +int __jbd2_log_space_left(journal_t *); /* Called with journal locked */ int jbd2_log_start_commit(journal_t *journal, tid_t tid); int __jbd2_log_start_commit(journal_t *journal, tid_t tid); int jbd2_journal_start_commit(journal_t *journal, tid_t *tid); +int jbd2_journal_force_commit_nested(journal_t *journal); int jbd2_log_wait_commit(journal_t *journal, tid_t tid); int jbd2_complete_transaction(journal_t *journal, tid_t tid); int jbd2_log_do_checkpoint(journal_t *journal); @@ -1266,7 +1235,7 @@ static inline int is_journal_aborted(journal_t *journal) static inline int is_handle_aborted(handle_t *handle) { - if (handle->h_aborted || !handle->h_transaction) + if (handle->h_aborted) return 1; return is_journal_aborted(handle->h_transaction->t_journal); } @@ -1296,38 +1265,17 @@ static inline int tid_geq(tid_t x, tid_t y) extern int jbd2_journal_blocks_per_page(struct inode *inode); extern size_t journal_tag_bytes(journal_t *journal); -/* - * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for - * transaction control blocks. - */ -#define JBD2_CONTROL_BLOCKS_SHIFT 5 - /* * Return the minimum number of blocks which must be free in the journal * before a new transaction may be started. Must be called under j_state_lock. */ -static inline int jbd2_space_needed(journal_t *journal) +static inline int jbd_space_needed(journal_t *journal) { int nblocks = journal->j_max_transaction_buffers; - return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT); -} - -/* - * Return number of free blocks in the log. Must be called under j_state_lock. - */ -static inline unsigned long jbd2_log_space_left(journal_t *journal) -{ - /* Allow for rounding errors */ - unsigned long free = journal->j_free - 32; - - if (journal->j_committing_transaction) { - unsigned long committing = atomic_read(&journal-> - j_committing_transaction->t_outstanding_credits); - - /* Transaction + control blocks */ - free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT); - } - return free; + if (journal->j_committing_transaction) + nblocks += atomic_read(&journal->j_committing_transaction-> + t_outstanding_credits); + return nblocks; } /* @@ -1338,9 +1286,11 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal) #define BJ_None 0 /* Not journaled */ #define BJ_Metadata 1 /* Normal journaled metadata */ #define BJ_Forget 2 /* Buffer superseded by this transaction */ -#define BJ_Shadow 3 /* Buffer contents being shadowed to the log */ -#define BJ_Reserved 4 /* Buffer is reserved for access by journal */ -#define BJ_Types 5 +#define BJ_IO 3 /* Buffer is for temporary IO use */ +#define BJ_Shadow 4 /* Buffer contents being shadowed to the log */ +#define BJ_LogCtl 5 /* Buffer contains log descriptors */ +#define BJ_Reserved 6 /* Buffer is reserved for access by journal */ +#define BJ_Types 7 extern int jbd_blocks_per_page(struct inode *inode); @@ -1369,19 +1319,6 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc, return *(u32 *)desc.ctx; } -/* Return most recent uncommitted transaction */ -static inline tid_t jbd2_get_latest_transaction(journal_t *journal) -{ - tid_t tid; - - read_lock(&journal->j_state_lock); - tid = journal->j_commit_request; - if (journal->j_running_transaction) - tid = journal->j_running_transaction->t_tid; - read_unlock(&journal->j_state_lock); - return tid; -} - #ifdef __KERNEL__ #define buffer_trace_init(bh) do {} while (0) diff --git a/trunk/include/linux/jbd_common.h b/trunk/include/linux/jbd_common.h index 3dc53432355f..6133679bc4c0 100644 --- a/trunk/include/linux/jbd_common.h +++ b/trunk/include/linux/jbd_common.h @@ -1,7 +1,31 @@ #ifndef _LINUX_JBD_STATE_H #define _LINUX_JBD_STATE_H -#include +enum jbd_state_bits { + BH_JBD /* Has an attached ext3 journal_head */ + = BH_PrivateStart, + BH_JWrite, /* Being written to log (@@@ DEBUGGING) */ + BH_Freed, /* Has been freed (truncated) */ + BH_Revoked, /* Has been revoked from the log */ + BH_RevokeValid, /* Revoked flag is valid */ + BH_JBDDirty, /* Is dirty but journaled */ + BH_State, /* Pins most journal_head state */ + BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ + BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */ + BH_Verified, /* Metadata block has been verified ok */ + BH_JBDPrivateStart, /* First bit available for private use by FS */ +}; + +BUFFER_FNS(JBD, jbd) +BUFFER_FNS(JWrite, jwrite) +BUFFER_FNS(JBDDirty, jbddirty) +TAS_BUFFER_FNS(JBDDirty, jbddirty) +BUFFER_FNS(Revoked, revoked) +TAS_BUFFER_FNS(Revoked, revoked) +BUFFER_FNS(RevokeValid, revokevalid) +TAS_BUFFER_FNS(RevokeValid, revokevalid) +BUFFER_FNS(Freed, freed) +BUFFER_FNS(Verified, verified) static inline struct buffer_head *jh2bh(struct journal_head *jh) { diff --git a/trunk/include/linux/journal-head.h b/trunk/include/linux/journal-head.h index 98cd41bb39c8..13a3da25ff07 100644 --- a/trunk/include/linux/journal-head.h +++ b/trunk/include/linux/journal-head.h @@ -30,19 +30,15 @@ struct journal_head { /* * Journalling list for this buffer [jbd_lock_bh_state()] - * NOTE: We *cannot* combine this with b_modified into a bitfield - * as gcc would then (which the C standard allows but which is - * very unuseful) make 64-bit accesses to the bitfield and clobber - * b_jcount if its update races with bitfield modification. */ - unsigned b_jlist; + unsigned b_jlist:4; /* * This flag signals the buffer has been modified by * the currently running transaction * [jbd_lock_bh_state()] */ - unsigned b_modified; + unsigned b_modified:1; /* * Copy of the buffer data frozen for writing to the log. diff --git a/trunk/include/linux/kernel.h b/trunk/include/linux/kernel.h index e9ef6d6b51d5..e96329ceb28c 100644 --- a/trunk/include/linux/kernel.h +++ b/trunk/include/linux/kernel.h @@ -562,9 +562,6 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...); extern __printf(2, 3) int __trace_printk(unsigned long ip, const char *fmt, ...); -extern int __trace_bputs(unsigned long ip, const char *str); -extern int __trace_puts(unsigned long ip, const char *str, int size); - /** * trace_puts - write a string into the ftrace buffer * @str: the string to record @@ -590,6 +587,8 @@ extern int __trace_puts(unsigned long ip, const char *str, int size); * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) */ +extern int __trace_bputs(unsigned long ip, const char *str); +extern int __trace_puts(unsigned long ip, const char *str, int size); #define trace_puts(str) ({ \ static const char *trace_printk_fmt \ __attribute__((section("__trace_printk_fmt"))) = \ diff --git a/trunk/include/linux/kref.h b/trunk/include/linux/kref.h index 484604d184be..e15828fd71f1 100644 --- a/trunk/include/linux/kref.h +++ b/trunk/include/linux/kref.h @@ -19,7 +19,6 @@ #include #include #include -#include struct kref { atomic_t refcount; @@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref) return kref_sub(kref, 1, release); } -/** - * kref_put_spinlock_irqsave - decrement refcount for object. - * @kref: object. - * @release: pointer to the function that will clean up the object when the - * last reference to the object is released. - * This pointer is required, and it is not acceptable to pass kfree - * in as this function. - * @lock: lock to take in release case - * - * Behaves identical to kref_put with one exception. If the reference count - * drops to zero, the lock will be taken atomically wrt dropping the reference - * count. The release function has to call spin_unlock() without _irqrestore. - */ -static inline int kref_put_spinlock_irqsave(struct kref *kref, - void (*release)(struct kref *kref), - spinlock_t *lock) -{ - unsigned long flags; - - WARN_ON(release == NULL); - if (atomic_add_unless(&kref->refcount, -1, 1)) - return 0; - spin_lock_irqsave(lock, flags); - if (atomic_dec_and_test(&kref->refcount)) { - release(kref); - local_irq_restore(flags); - return 1; - } - spin_unlock_irqrestore(lock, flags); - return 0; -} - static inline int kref_put_mutex(struct kref *kref, void (*release)(struct kref *kref), struct mutex *lock) diff --git a/trunk/include/linux/kvm_host.h b/trunk/include/linux/kvm_host.h index 8db53cfaccdb..f0eea07d2c2b 100644 --- a/trunk/include/linux/kvm_host.h +++ b/trunk/include/linux/kvm_host.h @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -761,6 +760,42 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm) } #endif +static inline void __guest_enter(void) +{ + /* + * This is running in ioctl context so we can avoid + * the call to vtime_account() with its unnecessary idle check. + */ + vtime_account_system(current); + current->flags |= PF_VCPU; +} + +static inline void __guest_exit(void) +{ + /* + * This is running in ioctl context so we can avoid + * the call to vtime_account() with its unnecessary idle check. + */ + vtime_account_system(current); + current->flags &= ~PF_VCPU; +} + +#ifdef CONFIG_CONTEXT_TRACKING +extern void guest_enter(void); +extern void guest_exit(void); + +#else /* !CONFIG_CONTEXT_TRACKING */ +static inline void guest_enter(void) +{ + __guest_enter(); +} + +static inline void guest_exit(void) +{ + __guest_exit(); +} +#endif /* !CONFIG_CONTEXT_TRACKING */ + static inline void kvm_guest_enter(void) { unsigned long flags; diff --git a/trunk/include/linux/list.h b/trunk/include/linux/list.h index b83e5657365a..6a1f8df9144b 100644 --- a/trunk/include/linux/list.h +++ b/trunk/include/linux/list.h @@ -361,17 +361,6 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) -/** - * list_first_entry_or_null - get the first element from a list - * @ptr: the list head to take the element from. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - * - * Note that if the list is empty, it returns NULL. - */ -#define list_first_entry_or_null(ptr, type, member) \ - (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) - /** * list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. diff --git a/trunk/drivers/block/loop.h b/trunk/include/linux/loop.h similarity index 98% rename from trunk/drivers/block/loop.h rename to trunk/include/linux/loop.h index 90df5d6485b6..460b60fa7adf 100644 --- a/trunk/drivers/block/loop.h +++ b/trunk/include/linux/loop.h @@ -1,5 +1,5 @@ /* - * loop.h + * include/linux/loop.h * * Written by Theodore Ts'o, 3/29/93. * diff --git a/trunk/include/linux/math64.h b/trunk/include/linux/math64.h index 2913b86eb12a..b8ba85544721 100644 --- a/trunk/include/linux/math64.h +++ b/trunk/include/linux/math64.h @@ -6,8 +6,7 @@ #if BITS_PER_LONG == 64 -#define div64_long(x, y) div64_s64((x), (y)) -#define div64_ul(x, y) div64_u64((x), (y)) +#define div64_long(x,y) div64_s64((x),(y)) /** * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder @@ -48,8 +47,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor) #elif BITS_PER_LONG == 32 -#define div64_long(x, y) div_s64((x), (y)) -#define div64_ul(x, y) div_u64((x), (y)) +#define div64_long(x,y) div_s64((x),(y)) #ifndef div_u64_rem static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) diff --git a/trunk/include/linux/mfd/abx500/ab8500.h b/trunk/include/linux/mfd/abx500/ab8500.h index 0390d5943ed6..fb1bf7d6a410 100644 --- a/trunk/include/linux/mfd/abx500/ab8500.h +++ b/trunk/include/linux/mfd/abx500/ab8500.h @@ -373,11 +373,13 @@ struct ab8500_sysctrl_platform_data; /** * struct ab8500_platform_data - AB8500 platform data * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used + * @pm_power_off: Should machine pm power off hook be registered or not * @init: board-specific initialization after detection of ab8500 * @regulator: machine-specific constraints for regulators */ struct ab8500_platform_data { int irq_base; + bool pm_power_off; void (*init) (struct ab8500 *); struct ab8500_regulator_platform_data *regulator; struct abx500_gpio_platform_data *gpio; diff --git a/trunk/include/linux/mlx4/qp.h b/trunk/include/linux/mlx4/qp.h index 352eec9df1b8..67f46ad6920a 100644 --- a/trunk/include/linux/mlx4/qp.h +++ b/trunk/include/linux/mlx4/qp.h @@ -126,7 +126,7 @@ struct mlx4_rss_context { struct mlx4_qp_path { u8 fl; - u8 vlan_control; + u8 reserved1[1]; u8 disable_pkey_check; u8 pkey_index; u8 counter_index; @@ -141,32 +141,11 @@ struct mlx4_qp_path { u8 sched_queue; u8 vlan_index; u8 feup; - u8 fvl_rx; + u8 reserved3; u8 reserved4[2]; u8 dmac[6]; }; -enum { /* fl */ - MLX4_FL_CV = 1 << 6, - MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2 -}; -enum { /* vlan_control */ - MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6, - MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2, - MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */ - MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0 -}; - -enum { /* feup */ - MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */ - MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */ - MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */ -}; - -enum { /* fvl_rx */ - MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */ -}; - struct mlx4_qp_context { __be32 flags; __be32 pd; @@ -206,10 +185,6 @@ struct mlx4_qp_context { u32 reserved5[10]; }; -enum { /* param3 */ - MLX4_STRIP_VLAN = 1 << 30 -}; - /* Which firmware version adds support for NEC (NoErrorCompletion) bit */ #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h index 66d881f1d576..e0c8528a41a4 100644 --- a/trunk/include/linux/mm.h +++ b/trunk/include/linux/mm.h @@ -1041,8 +1041,7 @@ int get_kernel_page(unsigned long start, int write, struct page **pages); struct page *get_dump_page(unsigned long addr); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); -extern void do_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); +extern void do_invalidatepage(struct page *page, unsigned long offset); int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_no_writeback(struct page *page); diff --git a/trunk/include/linux/netdevice.h b/trunk/include/linux/netdevice.h index 96e4c21e15e0..a94a5a0ab122 100644 --- a/trunk/include/linux/netdevice.h +++ b/trunk/include/linux/netdevice.h @@ -1695,7 +1695,6 @@ extern int init_dummy_netdev(struct net_device *dev); extern struct net_device *dev_get_by_index(struct net *net, int ifindex); extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); -extern int netdev_get_name(struct net *net, char *name, int ifindex); extern int dev_restart(struct net_device *dev); #ifdef CONFIG_NETPOLL_TRAP extern int netpoll_trap(void); @@ -2734,17 +2733,6 @@ static inline netdev_features_t netdev_get_wanted_features( } netdev_features_t netdev_increment_features(netdev_features_t all, netdev_features_t one, netdev_features_t mask); - -/* Allow TSO being used on stacked device : - * Performing the GSO segmentation before last device - * is a performance improvement. - */ -static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, - netdev_features_t mask) -{ - return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); -} - int __netdev_update_features(struct net_device *dev); void netdev_update_features(struct net_device *dev); void netdev_change_features(struct net_device *dev); diff --git a/trunk/include/linux/netfilter_ipv6.h b/trunk/include/linux/netfilter_ipv6.h index 2d4df6ce043e..98ffb54988b6 100644 --- a/trunk/include/linux/netfilter_ipv6.h +++ b/trunk/include/linux/netfilter_ipv6.h @@ -17,22 +17,6 @@ extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, extern int ipv6_netfilter_init(void); extern void ipv6_netfilter_fini(void); - -/* - * Hook functions for ipv6 to allow xt_* modules to be built-in even - * if IPv6 is a module. - */ -struct nf_ipv6_ops { - int (*chk_addr)(struct net *net, const struct in6_addr *addr, - const struct net_device *dev, int strict); -}; - -extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; -static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) -{ - return rcu_dereference(nf_ipv6_ops); -} - #else /* CONFIG_NETFILTER */ static inline int ipv6_netfilter_init(void) { return 0; } static inline void ipv6_netfilter_fini(void) { return; } diff --git a/trunk/include/linux/of_platform.h b/trunk/include/linux/of_platform.h index 2a93b64a3869..3863a4dbdf18 100644 --- a/trunk/include/linux/of_platform.h +++ b/trunk/include/linux/of_platform.h @@ -11,10 +11,9 @@ * */ +#ifdef CONFIG_OF_DEVICE #include #include - -#ifdef CONFIG_OF_DEVICE #include #include #include @@ -101,7 +100,7 @@ extern int of_platform_populate(struct device_node *root, #if !defined(CONFIG_OF_ADDRESS) struct of_dev_auxdata; -struct device_node; +struct device; static inline int of_platform_populate(struct device_node *root, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, diff --git a/trunk/include/linux/pci-acpi.h b/trunk/include/linux/pci-acpi.h index 170447977278..81b31613eb25 100644 --- a/trunk/include/linux/pci-acpi.h +++ b/trunk/include/linux/pci-acpi.h @@ -60,13 +60,11 @@ static inline void acpi_pci_slot_remove(struct pci_bus *bus) { } void acpiphp_init(void); void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle); void acpiphp_remove_slots(struct pci_bus *bus); -void acpiphp_check_host_bridge(acpi_handle handle); #else static inline void acpiphp_init(void) { } static inline void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle) { } static inline void acpiphp_remove_slots(struct pci_bus *bus) { } -static inline void acpiphp_check_host_bridge(acpi_handle handle) { } #endif #else /* CONFIG_ACPI */ diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h index c5b6dbf9c2fc..f463a46424e2 100644 --- a/trunk/include/linux/perf_event.h +++ b/trunk/include/linux/perf_event.h @@ -389,7 +389,8 @@ struct perf_event { /* mmap bits */ struct mutex mmap_mutex; atomic_t mmap_count; - + int mmap_locked; + struct user_struct *mmap_user; struct ring_buffer *rb; struct list_head rb_entry; diff --git a/trunk/include/linux/pinctrl/pinconf-generic.h b/trunk/include/linux/pinctrl/pinconf-generic.h index 6aa238096622..72474e18f1e0 100644 --- a/trunk/include/linux/pinctrl/pinconf-generic.h +++ b/trunk/include/linux/pinctrl/pinconf-generic.h @@ -37,17 +37,17 @@ * if it is 0, pull-down is disabled. * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and * low, this is the most typical case and is typically achieved with two - * active transistors on the output. Setting this config will enable + * active transistors on the output. Sending this config will enabale * push-pull mode, the argument is ignored. * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open * collector) which means it is usually wired with other output ports - * which are then pulled up with an external resistor. Setting this - * config will enable open drain mode, the argument is ignored. + * which are then pulled up with an external resistor. Sending this + * config will enabale open drain mode, the argument is ignored. * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source - * (open emitter). Setting this config will enable open drain mode, the + * (open emitter). Sending this config will enabale open drain mode, the * argument is ignored. - * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current - * passed as argument. The argument is in mA. + * @PIN_CONFIG_DRIVE_STRENGTH: the pin will output the current passed as + * argument. The argument is in mA. * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, * schmitt-trigger mode is disabled. diff --git a/trunk/include/linux/platform_data/clk-lpss.h b/trunk/include/linux/platform_data/clk-lpss.h index 23901992b9dd..528e73ce46d2 100644 --- a/trunk/include/linux/platform_data/clk-lpss.h +++ b/trunk/include/linux/platform_data/clk-lpss.h @@ -13,11 +13,6 @@ #ifndef __CLK_LPSS_H #define __CLK_LPSS_H -struct lpss_clk_data { - const char *name; - struct clk *clk; -}; - extern int lpt_clk_init(void); #endif /* __CLK_LPSS_H */ diff --git a/trunk/include/linux/platform_data/serial-omap.h b/trunk/include/linux/platform_data/serial-omap.h index c860c1b314c0..ff9b0aab5281 100644 --- a/trunk/include/linux/platform_data/serial-omap.h +++ b/trunk/include/linux/platform_data/serial-omap.h @@ -43,6 +43,8 @@ struct omap_uart_port_info { int DTR_present; int (*get_context_loss_count)(struct device *); + void (*set_forceidle)(struct device *); + void (*set_noidle)(struct device *); void (*enable_wakeup)(struct device *, bool); }; diff --git a/trunk/include/linux/preempt.h b/trunk/include/linux/preempt.h index f5d4723cdb3d..87a03c746f17 100644 --- a/trunk/include/linux/preempt.h +++ b/trunk/include/linux/preempt.h @@ -33,25 +33,9 @@ do { \ preempt_schedule(); \ } while (0) -#ifdef CONFIG_CONTEXT_TRACKING - -void preempt_schedule_context(void); - -#define preempt_check_resched_context() \ -do { \ - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ - preempt_schedule_context(); \ -} while (0) -#else - -#define preempt_check_resched_context() preempt_check_resched() - -#endif /* CONFIG_CONTEXT_TRACKING */ - #else /* !CONFIG_PREEMPT */ #define preempt_check_resched() do { } while (0) -#define preempt_check_resched_context() do { } while (0) #endif /* CONFIG_PREEMPT */ @@ -104,7 +88,7 @@ do { \ do { \ preempt_enable_no_resched_notrace(); \ barrier(); \ - preempt_check_resched_context(); \ + preempt_check_resched(); \ } while (0) #else /* !CONFIG_PREEMPT_COUNT */ diff --git a/trunk/include/linux/printk.h b/trunk/include/linux/printk.h index 22c7052e9372..6af944ab38f0 100644 --- a/trunk/include/linux/printk.h +++ b/trunk/include/linux/printk.h @@ -4,7 +4,6 @@ #include #include #include -#include extern const char linux_banner[]; extern const char linux_proc_banner[]; diff --git a/trunk/include/linux/rculist.h b/trunk/include/linux/rculist.h index f4b1001a4676..8089e35d47ac 100644 --- a/trunk/include/linux/rculist.h +++ b/trunk/include/linux/rculist.h @@ -460,26 +460,6 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) -/** - * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the hlist_node within the struct. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as hlist_add_head_rcu() - * as long as the traversal is guarded by rcu_read_lock(). - * - * This is the same as hlist_for_each_entry_rcu() except that it does - * not do any RCU debugging or tracing. - */ -#define hlist_for_each_entry_rcu_notrace(pos, head, member) \ - for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ - typeof(*(pos)), member); \ - pos; \ - pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ - &(pos)->member)), typeof(*(pos)), member)) - /** * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. diff --git a/trunk/include/linux/rculist_nulls.h b/trunk/include/linux/rculist_nulls.h index 1c33dd7da4a7..2ae13714828b 100644 --- a/trunk/include/linux/rculist_nulls.h +++ b/trunk/include/linux/rculist_nulls.h @@ -105,14 +105,9 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, * @head: the head for your list. * @member: the name of the hlist_nulls_node within the struct. * - * The barrier() is needed to make sure compiler doesn't cache first element [1], - * as this loop can be restarted [2] - * [1] Documentation/atomic_ops.txt around line 114 - * [2] Documentation/RCU/rculist_nulls.txt around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ - for (({barrier();}), \ - pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ + for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) diff --git a/trunk/include/linux/rcupdate.h b/trunk/include/linux/rcupdate.h index ddcc7826d907..4ccd68e49b00 100644 --- a/trunk/include/linux/rcupdate.h +++ b/trunk/include/linux/rcupdate.h @@ -640,15 +640,6 @@ static inline void rcu_preempt_sleep_check(void) #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ -/* - * The tracing infrastructure traces RCU (we want that), but unfortunately - * some of the RCU checks causes tracing to lock up the system. - * - * The tracing version of rcu_dereference_raw() must not call - * rcu_read_lock_held(). - */ -#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) - /** * rcu_access_index() - fetch RCU index with no dereferencing * @p: The index to read diff --git a/trunk/include/linux/rio.h b/trunk/include/linux/rio.h index 18e099342e6f..a3e784278667 100644 --- a/trunk/include/linux/rio.h +++ b/trunk/include/linux/rio.h @@ -83,6 +83,7 @@ extern struct bus_type rio_bus_type; extern struct device rio_bus; +extern struct list_head rio_devices; /* list of all devices */ struct rio_mport; struct rio_dev; @@ -236,7 +237,6 @@ enum rio_phy_type { * @name: Port name string * @priv: Master port private data * @dma: DMA device associated with mport - * @nscan: RapidIO network enumeration/discovery operations */ struct rio_mport { struct list_head dbells; /* list of doorbell events */ @@ -262,14 +262,8 @@ struct rio_mport { #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct dma_device dma; #endif - struct rio_scan *nscan; }; -/* - * Enumeration/discovery control flags - */ -#define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */ - struct rio_id_table { u16 start; /* logical minimal id */ u32 max; /* max number of IDs in table */ @@ -466,16 +460,6 @@ static inline struct rio_mport *dma_to_mport(struct dma_device *ddev) } #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ -/** - * struct rio_scan - RIO enumeration and discovery operations - * @enumerate: Callback to perform RapidIO fabric enumeration. - * @discover: Callback to perform RapidIO fabric discovery. - */ -struct rio_scan { - int (*enumerate)(struct rio_mport *mport, u32 flags); - int (*discover)(struct rio_mport *mport, u32 flags); -}; - /* Architecture and hardware-specific functions */ extern int rio_register_mport(struct rio_mport *); extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); diff --git a/trunk/include/linux/rio_drv.h b/trunk/include/linux/rio_drv.h index 5059994fe297..b75c05920ab5 100644 --- a/trunk/include/linux/rio_drv.h +++ b/trunk/include/linux/rio_drv.h @@ -433,6 +433,5 @@ extern u16 rio_local_get_device_id(struct rio_mport *port); extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, struct rio_dev *from); -extern int rio_init_mports(void); #endif /* LINUX_RIO_DRV_H */ diff --git a/trunk/include/linux/scatterlist.h b/trunk/include/linux/scatterlist.h index 26806775b11b..5951e3f38878 100644 --- a/trunk/include/linux/scatterlist.h +++ b/trunk/include/linux/scatterlist.h @@ -111,9 +111,6 @@ static inline struct page *sg_page(struct scatterlist *sg) static inline void sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(!virt_addr_valid(buf)); -#endif sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); } diff --git a/trunk/include/linux/skbuff.h b/trunk/include/linux/skbuff.h index dec1748cd002..2e0ced1af3b1 100644 --- a/trunk/include/linux/skbuff.h +++ b/trunk/include/linux/skbuff.h @@ -627,7 +627,6 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb) } extern void kfree_skb(struct sk_buff *skb); -extern void kfree_skb_list(struct sk_buff *segs); extern void skb_tx_error(struct sk_buff *skb); extern void consume_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); @@ -2853,21 +2852,6 @@ static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) SKB_GSO_CB(inner_skb)->mac_offset; } -static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) -{ - int new_headroom, headroom; - int ret; - - headroom = skb_headroom(skb); - ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); - if (ret) - return ret; - - new_headroom = skb_headroom(skb); - SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); - return 0; -} - static inline bool skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_size; diff --git a/trunk/include/linux/smp.h b/trunk/include/linux/smp.h index c8488763277f..e6564c1dc552 100644 --- a/trunk/include/linux/smp.h +++ b/trunk/include/linux/smp.h @@ -11,7 +11,6 @@ #include #include #include -#include extern void cpu_idle(void); @@ -140,17 +139,13 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) } #define smp_call_function(func, info, wait) \ (up_smp_call_function(func, info)) - -static inline int on_each_cpu(smp_call_func_t func, void *info, int wait) -{ - unsigned long flags; - - local_irq_save(flags); - func(info); - local_irq_restore(flags); - return 0; -} - +#define on_each_cpu(func,info,wait) \ + ({ \ + local_irq_disable(); \ + func(info); \ + local_irq_enable(); \ + 0; \ + }) /* * Note we still need to test the mask even for UP * because we actually can get an empty mask from diff --git a/trunk/include/linux/socket.h b/trunk/include/linux/socket.h index b10ce4b341ea..428c37a1f95c 100644 --- a/trunk/include/linux/socket.h +++ b/trunk/include/linux/socket.h @@ -305,6 +305,7 @@ struct ucred { extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred); +extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); extern int csum_partial_copy_fromiovecend(unsigned char *kdata, @@ -313,6 +314,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, unsigned int len, __wsum *csump); extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); +extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, int offset, int len); extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); @@ -320,9 +322,6 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); struct timespec; -/* The __sys_...msg variants allow MSG_CMSG_COMPAT */ -extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); -extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, diff --git a/trunk/include/linux/spi/spi.h b/trunk/include/linux/spi/spi.h index 6ff26c8db7b9..733eb5ee31c5 100644 --- a/trunk/include/linux/spi/spi.h +++ b/trunk/include/linux/spi/spi.h @@ -57,7 +57,7 @@ extern struct bus_type spi_bus_type; * @modalias: Name of the driver to use with this device, or an alias * for that name. This appears in the sysfs "modalias" attribute * for driver coldplugging, and in uevents used for hotplugging - * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when + * @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when * when not using a GPIO line) * * A @spi_device is used to interchange data between an SPI slave @@ -266,7 +266,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * queue so the subsystem notifies the driver that it may relax the * hardware by issuing this call * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS - * number. Any individual value may be -ENOENT for CS lines that + * number. Any individual value may be -EINVAL for CS lines that * are not GPIOs (driven by the SPI controller itself). * * Each SPI master controller can communicate with one or more @spi_device diff --git a/trunk/include/linux/splice.h b/trunk/include/linux/splice.h index 74575cbf2d6f..09a545a7dfa3 100644 --- a/trunk/include/linux/splice.h +++ b/trunk/include/linux/splice.h @@ -35,7 +35,6 @@ struct splice_desc { void *data; /* cookie */ } u; loff_t pos; /* file position */ - loff_t *opos; /* sendfile: output position */ size_t num_spliced; /* number of bytes already spliced */ bool need_wakeup; /* need to wake up writer */ }; diff --git a/trunk/include/linux/swapops.h b/trunk/include/linux/swapops.h index c5fd30d2a415..47ead515c811 100644 --- a/trunk/include/linux/swapops.h +++ b/trunk/include/linux/swapops.h @@ -137,7 +137,6 @@ static inline void make_migration_entry_read(swp_entry_t *entry) extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); -extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte); #else #define make_migration_entry(page, write) swp_entry(0, 0) @@ -149,8 +148,6 @@ static inline int is_migration_entry(swp_entry_t swp) static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } -static inline void migration_entry_wait_huge(struct mm_struct *mm, - pte_t *pte) { } static inline int is_write_migration_entry(swp_entry_t entry) { return 0; diff --git a/trunk/include/linux/syslog.h b/trunk/include/linux/syslog.h index 98a3153c0f96..38911391a139 100644 --- a/trunk/include/linux/syslog.h +++ b/trunk/include/linux/syslog.h @@ -44,8 +44,8 @@ /* Return size of the log buffer */ #define SYSLOG_ACTION_SIZE_BUFFER 10 -#define SYSLOG_FROM_READER 0 -#define SYSLOG_FROM_PROC 1 +#define SYSLOG_FROM_CALL 0 +#define SYSLOG_FROM_FILE 1 int do_syslog(int type, char __user *buf, int count, bool from_file); diff --git a/trunk/include/linux/time.h b/trunk/include/linux/time.h index d5d229b2e5af..22d81b3c955b 100644 --- a/trunk/include/linux/time.h +++ b/trunk/include/linux/time.h @@ -117,10 +117,14 @@ static inline bool timespec_valid_strict(const struct timespec *ts) extern bool persistent_clock_exist; +#ifdef ALWAYS_USE_PERSISTENT_CLOCK +#define has_persistent_clock() true +#else static inline bool has_persistent_clock(void) { return persistent_clock_exist; } +#endif extern void read_persistent_clock(struct timespec *ts); extern void read_boot_clock(struct timespec *ts); diff --git a/trunk/include/linux/tracepoint.h b/trunk/include/linux/tracepoint.h index f8e084d0fc77..2f322c38bd4d 100644 --- a/trunk/include/linux/tracepoint.h +++ b/trunk/include/linux/tracepoint.h @@ -145,8 +145,8 @@ static inline void tracepoint_synchronize_unregister(void) TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond), \ - rcu_irq_enter(), \ - rcu_irq_exit()); \ + rcu_idle_exit(), \ + rcu_idle_enter()); \ } #else #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) diff --git a/trunk/include/linux/uio.h b/trunk/include/linux/uio.h index c55ce243cc09..629aaf51f30b 100644 --- a/trunk/include/linux/uio.h +++ b/trunk/include/linux/uio.h @@ -35,7 +35,4 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) } unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); - -int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); -int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); #endif diff --git a/trunk/include/linux/usb/gadget.h b/trunk/include/linux/usb/gadget.h index f1b0dca60f12..c454a88abf2e 100644 --- a/trunk/include/linux/usb/gadget.h +++ b/trunk/include/linux/usb/gadget.h @@ -563,8 +563,9 @@ static inline int gadget_is_dualspeed(struct usb_gadget *g) } /** - * gadget_is_superspeed() - return true if the hardware handles superspeed - * @g: controller that might support superspeed + * gadget_is_superspeed() - return true if the hardware handles + * supperspeed + * @g: controller that might support supper speed */ static inline int gadget_is_superspeed(struct usb_gadget *g) { diff --git a/trunk/include/linux/usb/serial.h b/trunk/include/linux/usb/serial.h index 302ddf55d2da..b9b0f7b4e43b 100644 --- a/trunk/include/linux/usb/serial.h +++ b/trunk/include/linux/usb/serial.h @@ -268,8 +268,6 @@ struct usb_serial_driver { struct usb_serial_port *port, struct ktermios *old); void (*break_ctl)(struct tty_struct *tty, int break_state); int (*chars_in_buffer)(struct tty_struct *tty); - void (*wait_until_sent)(struct tty_struct *tty, long timeout); - bool (*tx_empty)(struct usb_serial_port *port); void (*throttle)(struct tty_struct *tty); void (*unthrottle)(struct tty_struct *tty); int (*tiocmget)(struct tty_struct *tty); @@ -329,8 +327,6 @@ extern void usb_serial_generic_close(struct usb_serial_port *port); extern int usb_serial_generic_resume(struct usb_serial *serial); extern int usb_serial_generic_write_room(struct tty_struct *tty); extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); -extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty, - long timeout); extern void usb_serial_generic_read_bulk_callback(struct urb *urb); extern void usb_serial_generic_write_bulk_callback(struct urb *urb); extern void usb_serial_generic_throttle(struct tty_struct *tty); diff --git a/trunk/include/linux/vt_kern.h b/trunk/include/linux/vt_kern.h index 0d33fca48774..e8d65718560b 100644 --- a/trunk/include/linux/vt_kern.h +++ b/trunk/include/linux/vt_kern.h @@ -36,7 +36,7 @@ extern int fg_console, last_console, want_console; int vc_allocate(unsigned int console); int vc_cons_allocated(unsigned int console); int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines); -struct vc_data *vc_deallocate(unsigned int console); +void vc_deallocate(unsigned int console); void reset_palette(struct vc_data *vc); void do_blank_screen(int entering_gfx); void do_unblank_screen(int leaving_gfx); diff --git a/trunk/include/linux/vtime.h b/trunk/include/linux/vtime.h index b1dd2db80076..71a5782d8c59 100644 --- a/trunk/include/linux/vtime.h +++ b/trunk/include/linux/vtime.h @@ -34,7 +34,7 @@ static inline void vtime_user_exit(struct task_struct *tsk) } extern void vtime_guest_enter(struct task_struct *tsk); extern void vtime_guest_exit(struct task_struct *tsk); -extern void vtime_init_idle(struct task_struct *tsk, int cpu); +extern void vtime_init_idle(struct task_struct *tsk); #else static inline void vtime_account_irq_exit(struct task_struct *tsk) { @@ -45,7 +45,7 @@ static inline void vtime_user_enter(struct task_struct *tsk) { } static inline void vtime_user_exit(struct task_struct *tsk) { } static inline void vtime_guest_enter(struct task_struct *tsk) { } static inline void vtime_guest_exit(struct task_struct *tsk) { } -static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } +static inline void vtime_init_idle(struct task_struct *tsk) { } #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING diff --git a/trunk/include/linux/wait.h b/trunk/include/linux/wait.h index f487a4750b7f..ac38be2692d8 100644 --- a/trunk/include/linux/wait.h +++ b/trunk/include/linux/wait.h @@ -23,7 +23,6 @@ struct __wait_queue { struct wait_bit_key { void *flags; int bit_nr; -#define WAIT_ATOMIC_T_BIT_NR -1 }; struct wait_bit_queue { @@ -61,9 +60,6 @@ struct task_struct; #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ { .flags = word, .bit_nr = bit, } -#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \ - { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, } - extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); #define init_waitqueue_head(q) \ @@ -150,10 +146,8 @@ void __wake_up_bit(wait_queue_head_t *, void *, int); int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); void wake_up_bit(void *, int); -void wake_up_atomic_t(atomic_t *); int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); -int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); wait_queue_head_t *bit_waitqueue(void *, int); #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) @@ -223,8 +217,6 @@ do { \ if (!ret) \ break; \ } \ - if (!ret && (condition)) \ - ret = 1; \ finish_wait(&wq, &__wait); \ } while (0) @@ -241,9 +233,8 @@ do { \ * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * - * The function returns 0 if the @timeout elapsed, or the remaining - * jiffies (at least 1) if the @condition evaluated to %true before - * the @timeout elapsed. + * The function returns 0 if the @timeout elapsed, and the remaining + * jiffies if the condition evaluated to true before the timeout elapsed. */ #define wait_event_timeout(wq, condition, timeout) \ ({ \ @@ -311,8 +302,6 @@ do { \ ret = -ERESTARTSYS; \ break; \ } \ - if (!ret && (condition)) \ - ret = 1; \ finish_wait(&wq, &__wait); \ } while (0) @@ -329,10 +318,9 @@ do { \ * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * - * Returns: - * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by - * a signal, or the remaining jiffies (at least 1) if the @condition - * evaluated to %true before the @timeout elapsed. + * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it + * was interrupted by a signal, and the remaining jiffies otherwise + * if the condition evaluated to true before the timeout elapsed. */ #define wait_event_interruptible_timeout(wq, condition, timeout) \ ({ \ @@ -908,23 +896,5 @@ static inline int wait_on_bit_lock(void *word, int bit, return 0; return out_of_line_wait_on_bit_lock(word, bit, action, mode); } - -/** - * wait_on_atomic_t - Wait for an atomic_t to become 0 - * @val: The atomic value being waited on, a kernel virtual address - * @action: the function used to sleep, which may take special actions - * @mode: the task state to sleep in - * - * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for - * the purpose of getting a waitqueue, but we set the key to a bit number - * outside of the target 'word'. - */ -static inline -int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) -{ - if (atomic_read(val) == 0) - return 0; - return out_of_line_wait_on_atomic_t(val, action, mode); -} #endif diff --git a/trunk/include/linux/writeback.h b/trunk/include/linux/writeback.h index abfe11787af3..579a5007c696 100644 --- a/trunk/include/linux/writeback.h +++ b/trunk/include/linux/writeback.h @@ -78,7 +78,6 @@ struct writeback_control { unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned range_cyclic:1; /* range_start is cyclic */ - unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ }; /* diff --git a/trunk/include/media/v4l2-mem2mem.h b/trunk/include/media/v4l2-mem2mem.h index 0f4555b2a31b..d3eef01da648 100644 --- a/trunk/include/media/v4l2-mem2mem.h +++ b/trunk/include/media/v4l2-mem2mem.h @@ -110,8 +110,6 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf); int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf); -int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, - struct v4l2_create_buffers *create); int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_exportbuffer *eb); diff --git a/trunk/include/net/addrconf.h b/trunk/include/net/addrconf.h index 21f702704f24..84a6440f1f19 100644 --- a/trunk/include/net/addrconf.h +++ b/trunk/include/net/addrconf.h @@ -65,7 +65,7 @@ extern int addrconf_set_dstaddr(struct net *net, extern int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, - const struct net_device *dev, + struct net_device *dev, int strict); #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) diff --git a/trunk/include/net/bluetooth/hci_core.h b/trunk/include/net/bluetooth/hci_core.h index 7cb6d360d147..35a57cd1704c 100644 --- a/trunk/include/net/bluetooth/hci_core.h +++ b/trunk/include/net/bluetooth/hci_core.h @@ -1117,7 +1117,6 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event); int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); int mgmt_index_added(struct hci_dev *hdev); int mgmt_index_removed(struct hci_dev *hdev); -int mgmt_set_powered_failed(struct hci_dev *hdev, int err); int mgmt_powered(struct hci_dev *hdev, u8 powered); int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable); int mgmt_connectable(struct hci_dev *hdev, u8 connectable); diff --git a/trunk/include/net/bluetooth/mgmt.h b/trunk/include/net/bluetooth/mgmt.h index 9944c3e68c5d..22980a7c3873 100644 --- a/trunk/include/net/bluetooth/mgmt.h +++ b/trunk/include/net/bluetooth/mgmt.h @@ -42,7 +42,6 @@ #define MGMT_STATUS_NOT_POWERED 0x0f #define MGMT_STATUS_CANCELLED 0x10 #define MGMT_STATUS_INVALID_INDEX 0x11 -#define MGMT_STATUS_RFKILLED 0x12 struct mgmt_hdr { __le16 opcode; diff --git a/trunk/include/net/ip_tunnels.h b/trunk/include/net/ip_tunnels.h index 09b1360e10bf..4b6f0b28f41f 100644 --- a/trunk/include/net/ip_tunnels.h +++ b/trunk/include/net/ip_tunnels.h @@ -95,10 +95,10 @@ struct ip_tunnel_net { int ip_tunnel_init(struct net_device *dev); void ip_tunnel_uninit(struct net_device *dev); void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); -int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, - struct rtnl_link_ops *ops, char *devname); +int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, + struct rtnl_link_ops *ops, char *devname); -void ip_tunnel_delete_net(struct ip_tunnel_net *itn); +void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn); void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params); diff --git a/trunk/include/net/mac80211.h b/trunk/include/net/mac80211.h index 885898a40d13..04c2d4670dc6 100644 --- a/trunk/include/net/mac80211.h +++ b/trunk/include/net/mac80211.h @@ -3043,8 +3043,7 @@ void ieee80211_napi_complete(struct ieee80211_hw *hw); * This function may not be called in IRQ context. Calls to this function * for a single hardware must be synchronized against each other. Calls to * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be - * mixed for a single hardware. Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * mixed for a single hardware. * * In process context use instead ieee80211_rx_ni(). * @@ -3060,8 +3059,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb); * (internally defers to a tasklet.) * * Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not - * be mixed for a single hardware.Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * be mixed for a single hardware. * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call @@ -3075,8 +3073,7 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb); * (internally disables bottom halves). * * Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may - * not be mixed for a single hardware. Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * not be mixed for a single hardware. * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call @@ -3199,8 +3196,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif, * This function may not be called in IRQ context. Calls to this function * for a single hardware must be synchronized against each other. Calls * to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe() - * may not be mixed for a single hardware. Must not run concurrently with - * ieee80211_rx() or ieee80211_rx_ni(). + * may not be mixed for a single hardware. * * @hw: the hardware the frame was transmitted by * @skb: the frame that was transmitted, owned by mac80211 after this call diff --git a/trunk/include/net/netfilter/nf_log.h b/trunk/include/net/netfilter/nf_log.h index 99eac12d040b..31f1fb9eb784 100644 --- a/trunk/include/net/netfilter/nf_log.h +++ b/trunk/include/net/netfilter/nf_log.h @@ -30,8 +30,7 @@ struct nf_loginfo { } u; }; -typedef void nf_logfn(struct net *net, - u_int8_t pf, +typedef void nf_logfn(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, diff --git a/trunk/include/net/netfilter/nfnetlink_log.h b/trunk/include/net/netfilter/nfnetlink_log.h index 5ca3f14f0998..e2dec42c2db2 100644 --- a/trunk/include/net/netfilter/nfnetlink_log.h +++ b/trunk/include/net/netfilter/nfnetlink_log.h @@ -2,8 +2,7 @@ #define _KER_NFNETLINK_LOG_H void -nfulnl_log_packet(struct net *net, - u_int8_t pf, +nfulnl_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, diff --git a/trunk/include/net/sch_generic.h b/trunk/include/net/sch_generic.h index e7f4e21cc3e1..f10818fc8804 100644 --- a/trunk/include/net/sch_generic.h +++ b/trunk/include/net/sch_generic.h @@ -679,26 +679,22 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, #endif struct psched_ratecfg { - u64 rate_bps; - u32 mult; - u16 overhead; - u8 shift; + u64 rate_bps; + u32 mult; + u32 shift; }; static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, unsigned int len) { - return ((u64)(len + r->overhead) * r->mult) >> r->shift; + return ((u64)len * r->mult) >> r->shift; } -extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); +extern void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate); -static inline void psched_ratecfg_getrate(struct tc_ratespec *res, - const struct psched_ratecfg *r) +static inline u32 psched_ratecfg_getrate(const struct psched_ratecfg *r) { - memset(res, 0, sizeof(*res)); - res->rate = r->rate_bps >> 3; - res->overhead = r->overhead; + return r->rate_bps >> 3; } #endif diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index 66772cf8c3c5..5c97b0fc5623 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -866,18 +866,6 @@ struct inet_hashinfo; struct raw_hashinfo; struct module; -/* - * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes - * un-modified. Special care is taken when initializing object to zero. - */ -static inline void sk_prot_clear_nulls(struct sock *sk, int size) -{ - if (offsetof(struct sock, sk_node.next) != 0) - memset(sk, 0, offsetof(struct sock, sk_node.next)); - memset(&sk->sk_node.pprev, 0, - size - offsetof(struct sock, sk_node.pprev)); -} - /* Networking protocol blocks we attach to sockets. * socket layer -> transport layer interface * transport -> network interface is defined by struct inet_proto diff --git a/trunk/include/net/xfrm.h b/trunk/include/net/xfrm.h index 94ce082b29dc..ae16531d0d35 100644 --- a/trunk/include/net/xfrm.h +++ b/trunk/include/net/xfrm.h @@ -1160,8 +1160,6 @@ static inline void xfrm_sk_free_policy(struct sock *sk) } } -extern void xfrm_garbage_collect(struct net *net); - #else static inline void xfrm_sk_free_policy(struct sock *sk) {} @@ -1196,9 +1194,6 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, { return 1; } -static inline void xfrm_garbage_collect(struct net *net) -{ -} #endif static __inline__ diff --git a/trunk/include/sound/soc-dapm.h b/trunk/include/sound/soc-dapm.h index 385c6329a967..d4609029f014 100644 --- a/trunk/include/sound/soc-dapm.h +++ b/trunk/include/sound/soc-dapm.h @@ -450,8 +450,7 @@ enum snd_soc_dapm_type { snd_soc_dapm_aif_in, /* audio interface input */ snd_soc_dapm_aif_out, /* audio interface output */ snd_soc_dapm_siggen, /* signal generator */ - snd_soc_dapm_dai_in, /* link to DAI structure */ - snd_soc_dapm_dai_out, + snd_soc_dapm_dai, /* link to DAI structure */ snd_soc_dapm_dai_link, /* link between two DAI structures */ }; diff --git a/trunk/include/target/target_core_base.h b/trunk/include/target/target_core_base.h index 4ea4f985f394..c4af592f7057 100644 --- a/trunk/include/target/target_core_base.h +++ b/trunk/include/target/target_core_base.h @@ -463,6 +463,7 @@ struct se_cmd { #define CMD_T_ABORTED (1 << 0) #define CMD_T_ACTIVE (1 << 1) #define CMD_T_COMPLETE (1 << 2) +#define CMD_T_QUEUED (1 << 3) #define CMD_T_SENT (1 << 4) #define CMD_T_STOP (1 << 5) #define CMD_T_FAILED (1 << 6) @@ -543,7 +544,6 @@ struct se_session { struct list_head sess_list; struct list_head sess_acl_list; struct list_head sess_cmd_list; - struct list_head sess_wait_list; spinlock_t sess_cmd_lock; struct kref sess_kref; }; @@ -572,8 +572,12 @@ struct se_dev_entry { bool def_pr_registered; /* See transport_lunflags_table */ u32 lun_flags; + u32 deve_cmds; u32 mapped_lun; + u32 average_bytes; + u32 last_byte_count; u32 total_cmds; + u32 total_bytes; u64 pr_res_key; u64 creation_time; u32 attach_count; diff --git a/trunk/include/target/target_core_fabric.h b/trunk/include/target/target_core_fabric.h index 1dcce9cc99b9..ba3471b73c07 100644 --- a/trunk/include/target/target_core_fabric.h +++ b/trunk/include/target/target_core_fabric.h @@ -114,7 +114,7 @@ sense_reason_t transport_generic_new_cmd(struct se_cmd *); void target_execute_cmd(struct se_cmd *cmd); -int transport_generic_free_cmd(struct se_cmd *, int); +void transport_generic_free_cmd(struct se_cmd *, int); bool transport_wait_for_tasks(struct se_cmd *); int transport_check_aborted_status(struct se_cmd *, int); @@ -123,7 +123,7 @@ int transport_send_check_condition_and_sense(struct se_cmd *, int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); int target_put_sess_cmd(struct se_session *, struct se_cmd *); void target_sess_cmd_list_set_waiting(struct se_session *); -void target_wait_for_sess_cmds(struct se_session *); +void target_wait_for_sess_cmds(struct se_session *, int); int core_alua_check_nonop_delay(struct se_cmd *); diff --git a/trunk/include/trace/events/ext3.h b/trunk/include/trace/events/ext3.h index 6797b9de90ed..15d11a39be47 100644 --- a/trunk/include/trace/events/ext3.h +++ b/trunk/include/trace/events/ext3.h @@ -290,14 +290,13 @@ DEFINE_EVENT(ext3__page_op, ext3_releasepage, ); TRACE_EVENT(ext3_invalidatepage, - TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + TP_PROTO(struct page *page, unsigned long offset), - TP_ARGS(page, offset, length), + TP_ARGS(page, offset), TP_STRUCT__entry( __field( pgoff_t, index ) - __field( unsigned int, offset ) - __field( unsigned int, length ) + __field( unsigned long, offset ) __field( ino_t, ino ) __field( dev_t, dev ) @@ -306,15 +305,14 @@ TRACE_EVENT(ext3_invalidatepage, TP_fast_assign( __entry->index = page->index; __entry->offset = offset; - __entry->length = length; __entry->ino = page->mapping->host->i_ino; __entry->dev = page->mapping->host->i_sb->s_dev; ), - TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u", + TP_printk("dev %d,%d ino %lu page_index %lu offset %lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - __entry->index, __entry->offset, __entry->length) + __entry->index, __entry->offset) ); TRACE_EVENT(ext3_discard_blocks, diff --git a/trunk/include/trace/events/ext4.h b/trunk/include/trace/events/ext4.h index 2068db241f22..d0e686402df8 100644 --- a/trunk/include/trace/events/ext4.h +++ b/trunk/include/trace/events/ext4.h @@ -19,57 +19,6 @@ struct extent_status; #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode)) -#define show_mballoc_flags(flags) __print_flags(flags, "|", \ - { EXT4_MB_HINT_MERGE, "HINT_MERGE" }, \ - { EXT4_MB_HINT_RESERVED, "HINT_RESV" }, \ - { EXT4_MB_HINT_METADATA, "HINT_MDATA" }, \ - { EXT4_MB_HINT_FIRST, "HINT_FIRST" }, \ - { EXT4_MB_HINT_BEST, "HINT_BEST" }, \ - { EXT4_MB_HINT_DATA, "HINT_DATA" }, \ - { EXT4_MB_HINT_NOPREALLOC, "HINT_NOPREALLOC" }, \ - { EXT4_MB_HINT_GROUP_ALLOC, "HINT_GRP_ALLOC" }, \ - { EXT4_MB_HINT_GOAL_ONLY, "HINT_GOAL_ONLY" }, \ - { EXT4_MB_HINT_TRY_GOAL, "HINT_TRY_GOAL" }, \ - { EXT4_MB_DELALLOC_RESERVED, "DELALLOC_RESV" }, \ - { EXT4_MB_STREAM_ALLOC, "STREAM_ALLOC" }, \ - { EXT4_MB_USE_ROOT_BLOCKS, "USE_ROOT_BLKS" }, \ - { EXT4_MB_USE_RESERVED, "USE_RESV" }) - -#define show_map_flags(flags) __print_flags(flags, "|", \ - { EXT4_GET_BLOCKS_CREATE, "CREATE" }, \ - { EXT4_GET_BLOCKS_UNINIT_EXT, "UNINIT" }, \ - { EXT4_GET_BLOCKS_DELALLOC_RESERVE, "DELALLOC" }, \ - { EXT4_GET_BLOCKS_PRE_IO, "PRE_IO" }, \ - { EXT4_GET_BLOCKS_CONVERT, "CONVERT" }, \ - { EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \ - { EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \ - { EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \ - { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }, \ - { EXT4_GET_BLOCKS_NO_PUT_HOLE, "NO_PUT_HOLE" }) - -#define show_mflags(flags) __print_flags(flags, "", \ - { EXT4_MAP_NEW, "N" }, \ - { EXT4_MAP_MAPPED, "M" }, \ - { EXT4_MAP_UNWRITTEN, "U" }, \ - { EXT4_MAP_BOUNDARY, "B" }, \ - { EXT4_MAP_UNINIT, "u" }, \ - { EXT4_MAP_FROM_CLUSTER, "C" }) - -#define show_free_flags(flags) __print_flags(flags, "|", \ - { EXT4_FREE_BLOCKS_METADATA, "METADATA" }, \ - { EXT4_FREE_BLOCKS_FORGET, "FORGET" }, \ - { EXT4_FREE_BLOCKS_VALIDATED, "VALIDATED" }, \ - { EXT4_FREE_BLOCKS_NO_QUOT_UPDATE, "NO_QUOTA" }, \ - { EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER,"1ST_CLUSTER" },\ - { EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER, "LAST_CLUSTER" }) - -#define show_extent_status(status) __print_flags(status, "", \ - { (1 << 3), "W" }, \ - { (1 << 2), "U" }, \ - { (1 << 1), "D" }, \ - { (1 << 0), "H" }) - - TRACE_EVENT(ext4_free_inode, TP_PROTO(struct inode *inode), @@ -332,7 +281,7 @@ DEFINE_EVENT(ext4__write_end, ext4_da_write_end, TP_ARGS(inode, pos, len, copied) ); -TRACE_EVENT(ext4_writepages, +TRACE_EVENT(ext4_da_writepages, TP_PROTO(struct inode *inode, struct writeback_control *wbc), TP_ARGS(inode, wbc), @@ -375,62 +324,46 @@ TRACE_EVENT(ext4_writepages, ); TRACE_EVENT(ext4_da_write_pages, - TP_PROTO(struct inode *inode, pgoff_t first_page, - struct writeback_control *wbc), + TP_PROTO(struct inode *inode, struct mpage_da_data *mpd), - TP_ARGS(inode, first_page, wbc), + TP_ARGS(inode, mpd), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) - __field( pgoff_t, first_page ) - __field( long, nr_to_write ) - __field( int, sync_mode ) + __field( __u64, b_blocknr ) + __field( __u32, b_size ) + __field( __u32, b_state ) + __field( unsigned long, first_page ) + __field( int, io_done ) + __field( int, pages_written ) + __field( int, sync_mode ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; - __entry->first_page = first_page; - __entry->nr_to_write = wbc->nr_to_write; - __entry->sync_mode = wbc->sync_mode; + __entry->b_blocknr = mpd->b_blocknr; + __entry->b_size = mpd->b_size; + __entry->b_state = mpd->b_state; + __entry->first_page = mpd->first_page; + __entry->io_done = mpd->io_done; + __entry->pages_written = mpd->pages_written; + __entry->sync_mode = mpd->wbc->sync_mode; ), - TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld " - "sync_mode %d", + TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x " + "first_page %lu io_done %d pages_written %d sync_mode %d", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, __entry->first_page, - __entry->nr_to_write, __entry->sync_mode) -); - -TRACE_EVENT(ext4_da_write_pages_extent, - TP_PROTO(struct inode *inode, struct ext4_map_blocks *map), - - TP_ARGS(inode, map), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( __u64, lblk ) - __field( __u32, len ) - __field( __u32, flags ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->lblk = map->m_lblk; - __entry->len = map->m_len; - __entry->flags = map->m_flags; - ), - - TP_printk("dev %d,%d ino %lu lblk %llu len %u flags %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, __entry->lblk, __entry->len, - show_mflags(__entry->flags)) + (unsigned long) __entry->ino, + __entry->b_blocknr, __entry->b_size, + __entry->b_state, __entry->first_page, + __entry->io_done, __entry->pages_written, + __entry->sync_mode + ) ); -TRACE_EVENT(ext4_writepages_result, +TRACE_EVENT(ext4_da_writepages_result, TP_PROTO(struct inode *inode, struct writeback_control *wbc, int ret, int pages_written), @@ -511,16 +444,16 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage, ); DECLARE_EVENT_CLASS(ext4_invalidatepage_op, - TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + TP_PROTO(struct page *page, unsigned long offset), - TP_ARGS(page, offset, length), + TP_ARGS(page, offset), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field( pgoff_t, index ) - __field( unsigned int, offset ) - __field( unsigned int, length ) + __field( unsigned long, offset ) + ), TP_fast_assign( @@ -528,26 +461,24 @@ DECLARE_EVENT_CLASS(ext4_invalidatepage_op, __entry->ino = page->mapping->host->i_ino; __entry->index = page->index; __entry->offset = offset; - __entry->length = length; ), - TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u", + TP_printk("dev %d,%d ino %lu page_index %lu offset %lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - (unsigned long) __entry->index, - __entry->offset, __entry->length) + (unsigned long) __entry->index, __entry->offset) ); DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage, - TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + TP_PROTO(struct page *page, unsigned long offset), - TP_ARGS(page, offset, length) + TP_ARGS(page, offset) ); DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage, - TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + TP_PROTO(struct page *page, unsigned long offset), - TP_ARGS(page, offset, length) + TP_ARGS(page, offset) ); TRACE_EVENT(ext4_discard_blocks, @@ -742,10 +673,10 @@ TRACE_EVENT(ext4_request_blocks, __entry->flags = ar->flags; ), - TP_printk("dev %d,%d ino %lu flags %s len %u lblk %u goal %llu " + TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu " "lleft %u lright %u pleft %llu pright %llu ", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags), + (unsigned long) __entry->ino, __entry->flags, __entry->len, __entry->logical, __entry->goal, __entry->lleft, __entry->lright, __entry->pleft, __entry->pright) @@ -784,10 +715,10 @@ TRACE_EVENT(ext4_allocate_blocks, __entry->flags = ar->flags; ), - TP_printk("dev %d,%d ino %lu flags %s len %u block %llu lblk %u " + TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u " "goal %llu lleft %u lright %u pleft %llu pright %llu", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags), + (unsigned long) __entry->ino, __entry->flags, __entry->len, __entry->block, __entry->logical, __entry->goal, __entry->lleft, __entry->lright, __entry->pleft, __entry->pright) @@ -817,11 +748,11 @@ TRACE_EVENT(ext4_free_blocks, __entry->mode = inode->i_mode; ), - TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %s", + TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->mode, __entry->block, __entry->count, - show_free_flags(__entry->flags)) + __entry->flags) ); TRACE_EVENT(ext4_sync_file_enter, @@ -972,7 +903,7 @@ TRACE_EVENT(ext4_mballoc_alloc, ), TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u " - "result %u/%d/%u@%u blks %u grps %u cr %u flags %s " + "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x " "tail %u broken %u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, @@ -983,7 +914,7 @@ TRACE_EVENT(ext4_mballoc_alloc, __entry->result_group, __entry->result_start, __entry->result_len, __entry->result_logical, __entry->found, __entry->groups, __entry->cr, - show_mballoc_flags(__entry->flags), __entry->tail, + __entry->flags, __entry->tail, __entry->buddy ? 1 << __entry->buddy : 0) ); @@ -1597,10 +1528,10 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter, __entry->flags = flags; ), - TP_printk("dev %d,%d ino %lu lblk %u len %u flags %s", + TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - __entry->lblk, __entry->len, show_map_flags(__entry->flags)) + __entry->lblk, __entry->len, __entry->flags) ); DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter, @@ -1618,53 +1549,47 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter, ); DECLARE_EVENT_CLASS(ext4__map_blocks_exit, - TP_PROTO(struct inode *inode, unsigned flags, struct ext4_map_blocks *map, - int ret), + TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret), - TP_ARGS(inode, flags, map, ret), + TP_ARGS(inode, map, ret), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) - __field( unsigned int, flags ) __field( ext4_fsblk_t, pblk ) __field( ext4_lblk_t, lblk ) __field( unsigned int, len ) - __field( unsigned int, mflags ) + __field( unsigned int, flags ) __field( int, ret ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; - __entry->flags = flags; __entry->pblk = map->m_pblk; __entry->lblk = map->m_lblk; __entry->len = map->m_len; - __entry->mflags = map->m_flags; + __entry->flags = map->m_flags; __entry->ret = ret; ), - TP_printk("dev %d,%d ino %lu flags %s lblk %u pblk %llu len %u " - "mflags %s ret %d", + TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u flags %x ret %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - show_map_flags(__entry->flags), __entry->lblk, __entry->pblk, - __entry->len, show_mflags(__entry->mflags), __entry->ret) + __entry->lblk, __entry->pblk, + __entry->len, __entry->flags, __entry->ret) ); DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit, - TP_PROTO(struct inode *inode, unsigned flags, - struct ext4_map_blocks *map, int ret), + TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret), - TP_ARGS(inode, flags, map, ret) + TP_ARGS(inode, map, ret) ); DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit, - TP_PROTO(struct inode *inode, unsigned flags, - struct ext4_map_blocks *map, int ret), + TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret), - TP_ARGS(inode, flags, map, ret) + TP_ARGS(inode, map, ret) ); TRACE_EVENT(ext4_ext_load_extent, @@ -1713,50 +1638,25 @@ TRACE_EVENT(ext4_load_inode, ); TRACE_EVENT(ext4_journal_start, - TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks, - unsigned long IP), + TP_PROTO(struct super_block *sb, int nblocks, unsigned long IP), - TP_ARGS(sb, blocks, rsv_blocks, IP), + TP_ARGS(sb, nblocks, IP), TP_STRUCT__entry( __field( dev_t, dev ) __field(unsigned long, ip ) - __field( int, blocks ) - __field( int, rsv_blocks ) + __field( int, nblocks ) ), TP_fast_assign( - __entry->dev = sb->s_dev; - __entry->ip = IP; - __entry->blocks = blocks; - __entry->rsv_blocks = rsv_blocks; + __entry->dev = sb->s_dev; + __entry->ip = IP; + __entry->nblocks = nblocks; ), - TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pF", + TP_printk("dev %d,%d nblocks %d caller %pF", MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->blocks, __entry->rsv_blocks, (void *)__entry->ip) -); - -TRACE_EVENT(ext4_journal_start_reserved, - TP_PROTO(struct super_block *sb, int blocks, unsigned long IP), - - TP_ARGS(sb, blocks, IP), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field(unsigned long, ip ) - __field( int, blocks ) - ), - - TP_fast_assign( - __entry->dev = sb->s_dev; - __entry->ip = IP; - __entry->blocks = blocks; - ), - - TP_printk("dev %d,%d blocks, %d caller %pF", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->blocks, (void *)__entry->ip) + __entry->nblocks, (void *)__entry->ip) ); DECLARE_EVENT_CLASS(ext4__trim, @@ -1836,12 +1736,12 @@ TRACE_EVENT(ext4_ext_handle_uninitialized_extents, __entry->newblk = newblock; ), - TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %s " + TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %x " "allocated %d newblock %llu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned) __entry->lblk, (unsigned long long) __entry->pblk, - __entry->len, show_map_flags(__entry->flags), + __entry->len, __entry->flags, (unsigned int) __entry->allocated, (unsigned long long) __entry->newblk) ); @@ -1869,10 +1769,10 @@ TRACE_EVENT(ext4_get_implied_cluster_alloc_exit, __entry->ret = ret; ), - TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %s ret %d", + TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->lblk, (unsigned long long) __entry->pblk, - __entry->len, show_mflags(__entry->flags), __entry->ret) + __entry->len, __entry->flags, __entry->ret) ); TRACE_EVENT(ext4_ext_put_in_cache, @@ -2026,7 +1926,7 @@ TRACE_EVENT(ext4_ext_show_extent, TRACE_EVENT(ext4_remove_blocks, TP_PROTO(struct inode *inode, struct ext4_extent *ex, ext4_lblk_t from, ext4_fsblk_t to, - long long partial_cluster), + ext4_fsblk_t partial_cluster), TP_ARGS(inode, ex, from, to, partial_cluster), @@ -2035,7 +1935,7 @@ TRACE_EVENT(ext4_remove_blocks, __field( ino_t, ino ) __field( ext4_lblk_t, from ) __field( ext4_lblk_t, to ) - __field( long long, partial ) + __field( ext4_fsblk_t, partial ) __field( ext4_fsblk_t, ee_pblk ) __field( ext4_lblk_t, ee_lblk ) __field( unsigned short, ee_len ) @@ -2053,7 +1953,7 @@ TRACE_EVENT(ext4_remove_blocks, ), TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]" - "from %u to %u partial_cluster %lld", + "from %u to %u partial_cluster %u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned) __entry->ee_lblk, @@ -2061,20 +1961,19 @@ TRACE_EVENT(ext4_remove_blocks, (unsigned short) __entry->ee_len, (unsigned) __entry->from, (unsigned) __entry->to, - (long long) __entry->partial) + (unsigned) __entry->partial) ); TRACE_EVENT(ext4_ext_rm_leaf, TP_PROTO(struct inode *inode, ext4_lblk_t start, - struct ext4_extent *ex, - long long partial_cluster), + struct ext4_extent *ex, ext4_fsblk_t partial_cluster), TP_ARGS(inode, start, ex, partial_cluster), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) - __field( long long, partial ) + __field( ext4_fsblk_t, partial ) __field( ext4_lblk_t, start ) __field( ext4_lblk_t, ee_lblk ) __field( ext4_fsblk_t, ee_pblk ) @@ -2092,14 +1991,14 @@ TRACE_EVENT(ext4_ext_rm_leaf, ), TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]" - "partial_cluster %lld", + "partial_cluster %u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned) __entry->start, (unsigned) __entry->ee_lblk, (unsigned long long) __entry->ee_pblk, (unsigned short) __entry->ee_len, - (long long) __entry->partial) + (unsigned) __entry->partial) ); TRACE_EVENT(ext4_ext_rm_idx, @@ -2126,16 +2025,14 @@ TRACE_EVENT(ext4_ext_rm_idx, ); TRACE_EVENT(ext4_ext_remove_space, - TP_PROTO(struct inode *inode, ext4_lblk_t start, - ext4_lblk_t end, int depth), + TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth), - TP_ARGS(inode, start, end, depth), + TP_ARGS(inode, start, depth), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field( ext4_lblk_t, start ) - __field( ext4_lblk_t, end ) __field( int, depth ) ), @@ -2143,31 +2040,28 @@ TRACE_EVENT(ext4_ext_remove_space, __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->start = start; - __entry->end = end; __entry->depth = depth; ), - TP_printk("dev %d,%d ino %lu since %u end %u depth %d", + TP_printk("dev %d,%d ino %lu since %u depth %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned) __entry->start, - (unsigned) __entry->end, __entry->depth) ); TRACE_EVENT(ext4_ext_remove_space_done, - TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end, - int depth, long long partial, __le16 eh_entries), + TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth, + ext4_lblk_t partial, __le16 eh_entries), - TP_ARGS(inode, start, end, depth, partial, eh_entries), + TP_ARGS(inode, start, depth, partial, eh_entries), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field( ext4_lblk_t, start ) - __field( ext4_lblk_t, end ) __field( int, depth ) - __field( long long, partial ) + __field( ext4_lblk_t, partial ) __field( unsigned short, eh_entries ) ), @@ -2175,20 +2069,18 @@ TRACE_EVENT(ext4_ext_remove_space_done, __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->start = start; - __entry->end = end; __entry->depth = depth; __entry->partial = partial; __entry->eh_entries = le16_to_cpu(eh_entries); ), - TP_printk("dev %d,%d ino %lu since %u end %u depth %d partial %lld " + TP_printk("dev %d,%d ino %lu since %u depth %d partial %u " "remaining_entries %u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned) __entry->start, - (unsigned) __entry->end, __entry->depth, - (long long) __entry->partial, + (unsigned) __entry->partial, (unsigned short) __entry->eh_entries) ); @@ -2203,7 +2095,7 @@ TRACE_EVENT(ext4_es_insert_extent, __field( ext4_lblk_t, lblk ) __field( ext4_lblk_t, len ) __field( ext4_fsblk_t, pblk ) - __field( char, status ) + __field( unsigned long long, status ) ), TP_fast_assign( @@ -2212,14 +2104,14 @@ TRACE_EVENT(ext4_es_insert_extent, __entry->lblk = es->es_lblk; __entry->len = es->es_len; __entry->pblk = ext4_es_pblock(es); - __entry->status = ext4_es_status(es) >> 60; + __entry->status = ext4_es_status(es); ), - TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s", + TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->lblk, __entry->len, - __entry->pblk, show_extent_status(__entry->status)) + __entry->pblk, __entry->status) ); TRACE_EVENT(ext4_es_remove_extent, @@ -2247,7 +2139,7 @@ TRACE_EVENT(ext4_es_remove_extent, __entry->lblk, __entry->len) ); -TRACE_EVENT(ext4_es_find_delayed_extent_range_enter, +TRACE_EVENT(ext4_es_find_delayed_extent_enter, TP_PROTO(struct inode *inode, ext4_lblk_t lblk), TP_ARGS(inode, lblk), @@ -2269,7 +2161,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_enter, (unsigned long) __entry->ino, __entry->lblk) ); -TRACE_EVENT(ext4_es_find_delayed_extent_range_exit, +TRACE_EVENT(ext4_es_find_delayed_extent_exit, TP_PROTO(struct inode *inode, struct extent_status *es), TP_ARGS(inode, es), @@ -2280,7 +2172,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_exit, __field( ext4_lblk_t, lblk ) __field( ext4_lblk_t, len ) __field( ext4_fsblk_t, pblk ) - __field( char, status ) + __field( unsigned long long, status ) ), TP_fast_assign( @@ -2289,14 +2181,14 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_exit, __entry->lblk = es->es_lblk; __entry->len = es->es_len; __entry->pblk = ext4_es_pblock(es); - __entry->status = ext4_es_status(es) >> 60; + __entry->status = ext4_es_status(es); ), - TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s", + TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->lblk, __entry->len, - __entry->pblk, show_extent_status(__entry->status)) + __entry->pblk, __entry->status) ); TRACE_EVENT(ext4_es_lookup_extent_enter, @@ -2333,7 +2225,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit, __field( ext4_lblk_t, lblk ) __field( ext4_lblk_t, len ) __field( ext4_fsblk_t, pblk ) - __field( char, status ) + __field( unsigned long long, status ) __field( int, found ) ), @@ -2343,16 +2235,16 @@ TRACE_EVENT(ext4_es_lookup_extent_exit, __entry->lblk = es->es_lblk; __entry->len = es->es_len; __entry->pblk = ext4_es_pblock(es); - __entry->status = ext4_es_status(es) >> 60; + __entry->status = ext4_es_status(es); __entry->found = found; ), - TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %s", + TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %llx", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->found, __entry->lblk, __entry->len, __entry->found ? __entry->pblk : 0, - show_extent_status(__entry->found ? __entry->status : 0)) + __entry->found ? __entry->status : 0) ); TRACE_EVENT(ext4_es_shrink_enter, diff --git a/trunk/include/uapi/linux/Kbuild b/trunk/include/uapi/linux/Kbuild index bdc6e87ff3eb..ab5d4992e568 100644 --- a/trunk/include/uapi/linux/Kbuild +++ b/trunk/include/uapi/linux/Kbuild @@ -261,7 +261,6 @@ header-y += net_dropmon.h header-y += net_tstamp.h header-y += netconf.h header-y += netdevice.h -header-y += netlink_diag.h header-y += netfilter.h header-y += netfilter_arp.h header-y += netfilter_bridge.h diff --git a/trunk/include/uapi/linux/kvm.h b/trunk/include/uapi/linux/kvm.h index d88c8ee00c8b..a5c86fc34a37 100644 --- a/trunk/include/uapi/linux/kvm.h +++ b/trunk/include/uapi/linux/kvm.h @@ -783,7 +783,6 @@ struct kvm_dirty_tlb { #define KVM_REG_IA64 0x3000000000000000ULL #define KVM_REG_ARM 0x4000000000000000ULL #define KVM_REG_S390 0x5000000000000000ULL -#define KVM_REG_MIPS 0x7000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL diff --git a/trunk/include/uapi/linux/virtio_console.h b/trunk/include/uapi/linux/virtio_console.h index c312f16bc4e7..ee13ab6c3614 100644 --- a/trunk/include/uapi/linux/virtio_console.h +++ b/trunk/include/uapi/linux/virtio_console.h @@ -39,7 +39,7 @@ #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ -#define VIRTIO_CONSOLE_BAD_ID (~(__u32)0) +#define VIRTIO_CONSOLE_BAD_ID (~(u32)0) struct virtio_console_config { /* colums of the screens */ diff --git a/trunk/include/video/omapdss.h b/trunk/include/video/omapdss.h index aeb4e9a0c5d1..62ca9a77c1d6 100644 --- a/trunk/include/video/omapdss.h +++ b/trunk/include/video/omapdss.h @@ -748,7 +748,6 @@ struct omap_dss_driver { }; enum omapdss_version omapdss_get_version(void); -bool omapdss_is_initialized(void); int omap_dss_register_driver(struct omap_dss_driver *); void omap_dss_unregister_driver(struct omap_dss_driver *); diff --git a/trunk/include/xen/xenbus.h b/trunk/include/xen/xenbus.h index 569c07f2e344..0a7515c1e3a4 100644 --- a/trunk/include/xen/xenbus.h +++ b/trunk/include/xen/xenbus.h @@ -70,7 +70,6 @@ struct xenbus_device { struct device dev; enum xenbus_state state; struct completion down; - struct work_struct work; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) diff --git a/trunk/init/Kconfig b/trunk/init/Kconfig index 2d9b83104dcf..9d3a7887a6d3 100644 --- a/trunk/init/Kconfig +++ b/trunk/init/Kconfig @@ -431,7 +431,6 @@ choice config TREE_RCU bool "Tree-based hierarchical RCU" depends on !PREEMPT && SMP - select IRQ_WORK help This option selects the RCU implementation that is designed for very large SMP system with hundreds or diff --git a/trunk/ipc/sem.c b/trunk/ipc/sem.c index 70480a3aa698..a7e40ed8a076 100644 --- a/trunk/ipc/sem.c +++ b/trunk/ipc/sem.c @@ -752,29 +752,19 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop int otime, struct list_head *pt) { int i; - int progress; - progress = 1; -retry_global: - if (sma->complex_count) { - if (update_queue(sma, -1, pt)) { - progress = 1; + if (sma->complex_count || sops == NULL) { + if (update_queue(sma, -1, pt)) otime = 1; - sops = NULL; - } } - if (!progress) - goto done; if (!sops) { /* No semops; something special is going on. */ for (i = 0; i < sma->sem_nsems; i++) { - if (update_queue(sma, i, pt)) { + if (update_queue(sma, i, pt)) otime = 1; - progress = 1; - } } - goto done_checkretry; + goto done; } /* Check the semaphores that were modified. */ @@ -782,15 +772,8 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop if (sops[i].sem_op > 0 || (sops[i].sem_op < 0 && sma->sem_base[sops[i].sem_num].semval == 0)) - if (update_queue(sma, sops[i].sem_num, pt)) { + if (update_queue(sma, sops[i].sem_num, pt)) otime = 1; - progress = 1; - } - } -done_checkretry: - if (progress) { - progress = 0; - goto retry_global; } done: if (otime) diff --git a/trunk/kernel/audit.c b/trunk/kernel/audit.c index 91e53d04b6a9..21c7fa615bd3 100644 --- a/trunk/kernel/audit.c +++ b/trunk/kernel/audit.c @@ -1056,7 +1056,7 @@ static inline void audit_get_stamp(struct audit_context *ctx, static void wait_for_auditd(unsigned long sleep_time) { DECLARE_WAITQUEUE(wait, current); - set_current_state(TASK_UNINTERRUPTIBLE); + set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&audit_backlog_wait, &wait); if (audit_backlog_limit && diff --git a/trunk/kernel/audit_tree.c b/trunk/kernel/audit_tree.c index 43c307dc9453..a291aa23fb3f 100644 --- a/trunk/kernel/audit_tree.c +++ b/trunk/kernel/audit_tree.c @@ -658,7 +658,6 @@ int audit_add_tree_rule(struct audit_krule *rule) struct vfsmount *mnt; int err; - rule->tree = NULL; list_for_each_entry(tree, &tree_list, list) { if (!strcmp(seed->pathname, tree->pathname)) { put_tree(seed); diff --git a/trunk/kernel/auditfilter.c b/trunk/kernel/auditfilter.c index 6bd4a90d1991..83a2970295d1 100644 --- a/trunk/kernel/auditfilter.c +++ b/trunk/kernel/auditfilter.c @@ -1021,6 +1021,9 @@ static void audit_log_rule_change(char *action, struct audit_krule *rule, int re * @seq: netlink audit message sequence (serial) number * @data: payload data * @datasz: size of payload data + * @loginuid: loginuid of sender + * @sessionid: sessionid for netlink audit message + * @sid: SE Linux Security ID of sender */ int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz) { diff --git a/trunk/kernel/cgroup.c b/trunk/kernel/cgroup.c index a7c9e6ddb979..2a9926275f80 100644 --- a/trunk/kernel/cgroup.c +++ b/trunk/kernel/cgroup.c @@ -1686,14 +1686,11 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, */ cgroup_drop_root(opts.new_root); - if (root->flags != opts.flags) { - if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) { - pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); - ret = -EINVAL; - goto drop_new_super; - } else { - pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n"); - } + if (((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) && + root->flags != opts.flags) { + pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); + ret = -EINVAL; + goto drop_new_super; } /* no subsys rebinding, so refcounts don't change */ @@ -2702,14 +2699,13 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, goto out; } - cfe->type = (void *)cft; - cfe->dentry = dentry; - dentry->d_fsdata = cfe; - simple_xattrs_init(&cfe->xattrs); - mode = cgroup_file_mode(cft); error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb); if (!error) { + cfe->type = (void *)cft; + cfe->dentry = dentry; + dentry->d_fsdata = cfe; + simple_xattrs_init(&cfe->xattrs); list_add_tail(&cfe->node, &parent->files); cfe = NULL; } @@ -2957,8 +2953,11 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, WARN_ON_ONCE(!rcu_read_lock_held()); /* if first iteration, pretend we just visited @cgroup */ - if (!pos) + if (!pos) { + if (list_empty(&cgroup->children)) + return NULL; pos = cgroup; + } /* visit the first child if exists */ next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling); @@ -2966,14 +2965,14 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, return next; /* no child, visit my or the closest ancestor's next sibling */ - while (pos != cgroup) { + do { next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling); if (&next->sibling != &pos->parent->children) return next; pos = pos->parent; - } + } while (pos != cgroup); return NULL; } diff --git a/trunk/kernel/context_tracking.c b/trunk/kernel/context_tracking.c index 383f8231e436..65349f07b878 100644 --- a/trunk/kernel/context_tracking.c +++ b/trunk/kernel/context_tracking.c @@ -15,6 +15,7 @@ */ #include +#include #include #include #include @@ -70,46 +71,6 @@ void user_enter(void) local_irq_restore(flags); } -#ifdef CONFIG_PREEMPT -/** - * preempt_schedule_context - preempt_schedule called by tracing - * - * The tracing infrastructure uses preempt_enable_notrace to prevent - * recursion and tracing preempt enabling caused by the tracing - * infrastructure itself. But as tracing can happen in areas coming - * from userspace or just about to enter userspace, a preempt enable - * can occur before user_exit() is called. This will cause the scheduler - * to be called when the system is still in usermode. - * - * To prevent this, the preempt_enable_notrace will use this function - * instead of preempt_schedule() to exit user context if needed before - * calling the scheduler. - */ -void __sched notrace preempt_schedule_context(void) -{ - struct thread_info *ti = current_thread_info(); - enum ctx_state prev_ctx; - - if (likely(ti->preempt_count || irqs_disabled())) - return; - - /* - * Need to disable preemption in case user_exit() is traced - * and the tracer calls preempt_enable_notrace() causing - * an infinite recursion. - */ - preempt_disable_notrace(); - prev_ctx = exception_enter(); - preempt_enable_no_resched_notrace(); - - preempt_schedule(); - - preempt_disable_notrace(); - exception_exit(prev_ctx); - preempt_enable_notrace(); -} -EXPORT_SYMBOL_GPL(preempt_schedule_context); -#endif /* CONFIG_PREEMPT */ /** * user_exit - Inform the context tracking that the CPU is diff --git a/trunk/kernel/cpu.c b/trunk/kernel/cpu.c index 198a38883e64..b5e4ab2d427e 100644 --- a/trunk/kernel/cpu.c +++ b/trunk/kernel/cpu.c @@ -133,27 +133,6 @@ static void cpu_hotplug_done(void) mutex_unlock(&cpu_hotplug.lock); } -/* - * Wait for currently running CPU hotplug operations to complete (if any) and - * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects - * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the - * hotplug path before performing hotplug operations. So acquiring that lock - * guarantees mutual exclusion from any currently running hotplug operations. - */ -void cpu_hotplug_disable(void) -{ - cpu_maps_update_begin(); - cpu_hotplug_disabled = 1; - cpu_maps_update_done(); -} - -void cpu_hotplug_enable(void) -{ - cpu_maps_update_begin(); - cpu_hotplug_disabled = 0; - cpu_maps_update_done(); -} - #else /* #if CONFIG_HOTPLUG_CPU */ static void cpu_hotplug_begin(void) {} static void cpu_hotplug_done(void) {} @@ -561,6 +540,36 @@ static int __init alloc_frozen_cpus(void) } core_initcall(alloc_frozen_cpus); +/* + * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU + * hotplug when tasks are about to be frozen. Also, don't allow the freezer + * to continue until any currently running CPU hotplug operation gets + * completed. + * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the + * 'cpu_add_remove_lock'. And this same lock is also taken by the regular + * CPU hotplug path and released only after it is complete. Thus, we + * (and hence the freezer) will block here until any currently running CPU + * hotplug operation gets completed. + */ +void cpu_hotplug_disable_before_freeze(void) +{ + cpu_maps_update_begin(); + cpu_hotplug_disabled = 1; + cpu_maps_update_done(); +} + + +/* + * When tasks have been thawed, re-enable regular CPU hotplug (which had been + * disabled while beginning to freeze tasks). + */ +void cpu_hotplug_enable_after_thaw(void) +{ + cpu_maps_update_begin(); + cpu_hotplug_disabled = 0; + cpu_maps_update_done(); +} + /* * When callbacks for CPU hotplug notifications are being executed, we must * ensure that the state of the system with respect to the tasks being frozen @@ -580,12 +589,12 @@ cpu_hotplug_pm_callback(struct notifier_block *nb, case PM_SUSPEND_PREPARE: case PM_HIBERNATION_PREPARE: - cpu_hotplug_disable(); + cpu_hotplug_disable_before_freeze(); break; case PM_POST_SUSPEND: case PM_POST_HIBERNATION: - cpu_hotplug_enable(); + cpu_hotplug_enable_after_thaw(); break; default: diff --git a/trunk/kernel/cpu/idle.c b/trunk/kernel/cpu/idle.c index e695c0a0bcb5..8b86c0c68edf 100644 --- a/trunk/kernel/cpu/idle.c +++ b/trunk/kernel/cpu/idle.c @@ -5,7 +5,6 @@ #include #include #include -#include #include @@ -41,13 +40,11 @@ __setup("hlt", cpu_idle_nopoll_setup); static inline int cpu_idle_poll(void) { - rcu_idle_enter(); trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); while (!need_resched()) cpu_relax(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); - rcu_idle_exit(); return 1; } @@ -59,7 +56,6 @@ void __weak arch_cpu_idle_dead(void) { } void __weak arch_cpu_idle(void) { cpu_idle_force_poll = 1; - local_irq_enable(); } /* @@ -114,21 +110,6 @@ static void cpu_idle_loop(void) void cpu_startup_entry(enum cpuhp_state state) { - /* - * This #ifdef needs to die, but it's too late in the cycle to - * make this generic (arm and sh have never invoked the canary - * init for the non boot cpus!). Will be fixed in 3.11 - */ -#ifdef CONFIG_X86 - /* - * If we're the non-boot CPU, nothing set the stack canary up - * for us. The boot CPU already has it initialized but no harm - * in doing it again. This is a good place for updating it, as - * we wont ever return from this function (so the invalid - * canaries already on the stack wont ever trigger). - */ - boot_init_stack_canary(); -#endif current_set_polling(); arch_cpu_idle_prepare(); cpu_idle_loop(); diff --git a/trunk/kernel/events/core.c b/trunk/kernel/events/core.c index b391907d5352..6b41c1899a8b 100644 --- a/trunk/kernel/events/core.c +++ b/trunk/kernel/events/core.c @@ -196,6 +196,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, static void update_context_time(struct perf_event_context *ctx); static u64 perf_event_time(struct perf_event *event); +static void ring_buffer_attach(struct perf_event *event, + struct ring_buffer *rb); + void __weak perf_event_print_debug(void) { } extern __weak const char *perf_pmu_name(void) @@ -2915,7 +2918,6 @@ static void free_event_rcu(struct rcu_head *head) } static void ring_buffer_put(struct ring_buffer *rb); -static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); static void free_event(struct perf_event *event) { @@ -2940,30 +2942,15 @@ static void free_event(struct perf_event *event) if (has_branch_stack(event)) { static_key_slow_dec_deferred(&perf_sched_events); /* is system-wide event */ - if (!(event->attach_state & PERF_ATTACH_TASK)) { + if (!(event->attach_state & PERF_ATTACH_TASK)) atomic_dec(&per_cpu(perf_branch_stack_events, event->cpu)); - } } } if (event->rb) { - struct ring_buffer *rb; - - /* - * Can happen when we close an event with re-directed output. - * - * Since we have a 0 refcount, perf_mmap_close() will skip - * over us; possibly making our ring_buffer_put() the last. - */ - mutex_lock(&event->mmap_mutex); - rb = event->rb; - if (rb) { - rcu_assign_pointer(event->rb, NULL); - ring_buffer_detach(event, rb); - ring_buffer_put(rb); /* could be last */ - } - mutex_unlock(&event->mmap_mutex); + ring_buffer_put(event->rb); + event->rb = NULL; } if (is_cgroup_event(event)) @@ -3201,13 +3188,30 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) unsigned int events = POLL_HUP; /* - * Pin the event->rb by taking event->mmap_mutex; otherwise - * perf_event_set_output() can swizzle our rb and make us miss wakeups. + * Race between perf_event_set_output() and perf_poll(): perf_poll() + * grabs the rb reference but perf_event_set_output() overrides it. + * Here is the timeline for two threads T1, T2: + * t0: T1, rb = rcu_dereference(event->rb) + * t1: T2, old_rb = event->rb + * t2: T2, event->rb = new rb + * t3: T2, ring_buffer_detach(old_rb) + * t4: T1, ring_buffer_attach(rb1) + * t5: T1, poll_wait(event->waitq) + * + * To avoid this problem, we grab mmap_mutex in perf_poll() + * thereby ensuring that the assignment of the new ring buffer + * and the detachment of the old buffer appear atomic to perf_poll() */ mutex_lock(&event->mmap_mutex); - rb = event->rb; - if (rb) + + rcu_read_lock(); + rb = rcu_dereference(event->rb); + if (rb) { + ring_buffer_attach(event, rb); events = atomic_xchg(&rb->poll, 0); + } + rcu_read_unlock(); + mutex_unlock(&event->mmap_mutex); poll_wait(file, &event->waitq, wait); @@ -3517,12 +3521,16 @@ static void ring_buffer_attach(struct perf_event *event, return; spin_lock_irqsave(&rb->event_lock, flags); - if (list_empty(&event->rb_entry)) - list_add(&event->rb_entry, &rb->event_list); + if (!list_empty(&event->rb_entry)) + goto unlock; + + list_add(&event->rb_entry, &rb->event_list); +unlock: spin_unlock_irqrestore(&rb->event_lock, flags); } -static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) +static void ring_buffer_detach(struct perf_event *event, + struct ring_buffer *rb) { unsigned long flags; @@ -3541,10 +3549,13 @@ static void ring_buffer_wakeup(struct perf_event *event) rcu_read_lock(); rb = rcu_dereference(event->rb); - if (rb) { - list_for_each_entry_rcu(event, &rb->event_list, rb_entry) - wake_up_all(&event->waitq); - } + if (!rb) + goto unlock; + + list_for_each_entry_rcu(event, &rb->event_list, rb_entry) + wake_up_all(&event->waitq); + +unlock: rcu_read_unlock(); } @@ -3573,10 +3584,18 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event) static void ring_buffer_put(struct ring_buffer *rb) { + struct perf_event *event, *n; + unsigned long flags; + if (!atomic_dec_and_test(&rb->refcount)) return; - WARN_ON_ONCE(!list_empty(&rb->event_list)); + spin_lock_irqsave(&rb->event_lock, flags); + list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) { + list_del_init(&event->rb_entry); + wake_up_all(&event->waitq); + } + spin_unlock_irqrestore(&rb->event_lock, flags); call_rcu(&rb->rcu_head, rb_free_rcu); } @@ -3586,100 +3605,26 @@ static void perf_mmap_open(struct vm_area_struct *vma) struct perf_event *event = vma->vm_file->private_data; atomic_inc(&event->mmap_count); - atomic_inc(&event->rb->mmap_count); } -/* - * A buffer can be mmap()ed multiple times; either directly through the same - * event, or through other events by use of perf_event_set_output(). - * - * In order to undo the VM accounting done by perf_mmap() we need to destroy - * the buffer here, where we still have a VM context. This means we need - * to detach all events redirecting to us. - */ static void perf_mmap_close(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; - struct ring_buffer *rb = event->rb; - struct user_struct *mmap_user = rb->mmap_user; - int mmap_locked = rb->mmap_locked; - unsigned long size = perf_data_size(rb); - - atomic_dec(&rb->mmap_count); - - if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) - return; - - /* Detach current event from the buffer. */ - rcu_assign_pointer(event->rb, NULL); - ring_buffer_detach(event, rb); - mutex_unlock(&event->mmap_mutex); + if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { + unsigned long size = perf_data_size(event->rb); + struct user_struct *user = event->mmap_user; + struct ring_buffer *rb = event->rb; - /* If there's still other mmap()s of this buffer, we're done. */ - if (atomic_read(&rb->mmap_count)) { - ring_buffer_put(rb); /* can't be last */ - return; - } - - /* - * No other mmap()s, detach from all other events that might redirect - * into the now unreachable buffer. Somewhat complicated by the - * fact that rb::event_lock otherwise nests inside mmap_mutex. - */ -again: - rcu_read_lock(); - list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { - if (!atomic_long_inc_not_zero(&event->refcount)) { - /* - * This event is en-route to free_event() which will - * detach it and remove it from the list. - */ - continue; - } - rcu_read_unlock(); - - mutex_lock(&event->mmap_mutex); - /* - * Check we didn't race with perf_event_set_output() which can - * swizzle the rb from under us while we were waiting to - * acquire mmap_mutex. - * - * If we find a different rb; ignore this event, a next - * iteration will no longer find it on the list. We have to - * still restart the iteration to make sure we're not now - * iterating the wrong list. - */ - if (event->rb == rb) { - rcu_assign_pointer(event->rb, NULL); - ring_buffer_detach(event, rb); - ring_buffer_put(rb); /* can't be last, we still have one */ - } + atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); + vma->vm_mm->pinned_vm -= event->mmap_locked; + rcu_assign_pointer(event->rb, NULL); + ring_buffer_detach(event, rb); mutex_unlock(&event->mmap_mutex); - put_event(event); - /* - * Restart the iteration; either we're on the wrong list or - * destroyed its integrity by doing a deletion. - */ - goto again; + ring_buffer_put(rb); + free_uid(user); } - rcu_read_unlock(); - - /* - * It could be there's still a few 0-ref events on the list; they'll - * get cleaned up by free_event() -- they'll also still have their - * ref on the rb and will free it whenever they are done with it. - * - * Aside from that, this buffer is 'fully' detached and unmapped, - * undo the VM accounting. - */ - - atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); - vma->vm_mm->pinned_vm -= mmap_locked; - free_uid(mmap_user); - - ring_buffer_put(rb); /* could be last */ } static const struct vm_operations_struct perf_mmap_vmops = { @@ -3729,24 +3674,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; WARN_ON_ONCE(event->ctx->parent_ctx); -again: mutex_lock(&event->mmap_mutex); if (event->rb) { - if (event->rb->nr_pages != nr_pages) { + if (event->rb->nr_pages == nr_pages) + atomic_inc(&event->rb->refcount); + else ret = -EINVAL; - goto unlock; - } - - if (!atomic_inc_not_zero(&event->rb->mmap_count)) { - /* - * Raced against perf_mmap_close() through - * perf_event_set_output(). Try again, hope for better - * luck. - */ - mutex_unlock(&event->mmap_mutex); - goto again; - } - goto unlock; } @@ -3787,16 +3720,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ret = -ENOMEM; goto unlock; } - - atomic_set(&rb->mmap_count, 1); - rb->mmap_locked = extra; - rb->mmap_user = get_current_user(); + rcu_assign_pointer(event->rb, rb); atomic_long_add(user_extra, &user->locked_vm); - vma->vm_mm->pinned_vm += extra; - - ring_buffer_attach(event, rb); - rcu_assign_pointer(event->rb, rb); + event->mmap_locked = extra; + event->mmap_user = get_current_user(); + vma->vm_mm->pinned_vm += event->mmap_locked; perf_event_update_userpage(event); @@ -3805,11 +3734,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) atomic_inc(&event->mmap_count); mutex_unlock(&event->mmap_mutex); - /* - * Since pinned accounting is per vm we cannot allow fork() to copy our - * vma. - */ - vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &perf_mmap_vmops; return ret; @@ -4469,64 +4394,6 @@ perf_event_read_event(struct perf_event *event, perf_output_end(&handle); } -typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data); -typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); - -static void -perf_event_aux_ctx(struct perf_event_context *ctx, - perf_event_aux_match_cb match, - perf_event_aux_output_cb output, - void *data) -{ - struct perf_event *event; - - list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { - if (event->state < PERF_EVENT_STATE_INACTIVE) - continue; - if (!event_filter_match(event)) - continue; - if (match(event, data)) - output(event, data); - } -} - -static void -perf_event_aux(perf_event_aux_match_cb match, - perf_event_aux_output_cb output, - void *data, - struct perf_event_context *task_ctx) -{ - struct perf_cpu_context *cpuctx; - struct perf_event_context *ctx; - struct pmu *pmu; - int ctxn; - - rcu_read_lock(); - list_for_each_entry_rcu(pmu, &pmus, entry) { - cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); - if (cpuctx->unique_pmu != pmu) - goto next; - perf_event_aux_ctx(&cpuctx->ctx, match, output, data); - if (task_ctx) - goto next; - ctxn = pmu->task_ctx_nr; - if (ctxn < 0) - goto next; - ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); - if (ctx) - perf_event_aux_ctx(ctx, match, output, data); -next: - put_cpu_ptr(pmu->pmu_cpu_context); - } - - if (task_ctx) { - preempt_disable(); - perf_event_aux_ctx(task_ctx, match, output, data); - preempt_enable(); - } - rcu_read_unlock(); -} - /* * task tracking -- fork/exit * @@ -4549,9 +4416,8 @@ struct perf_task_event { }; static void perf_event_task_output(struct perf_event *event, - void *data) + struct perf_task_event *task_event) { - struct perf_task_event *task_event = data; struct perf_output_handle handle; struct perf_sample_data sample; struct task_struct *task = task_event->task; @@ -4579,11 +4445,62 @@ static void perf_event_task_output(struct perf_event *event, task_event->event_id.header.size = size; } -static int perf_event_task_match(struct perf_event *event, - void *data __maybe_unused) +static int perf_event_task_match(struct perf_event *event) +{ + if (event->state < PERF_EVENT_STATE_INACTIVE) + return 0; + + if (!event_filter_match(event)) + return 0; + + if (event->attr.comm || event->attr.mmap || + event->attr.mmap_data || event->attr.task) + return 1; + + return 0; +} + +static void perf_event_task_ctx(struct perf_event_context *ctx, + struct perf_task_event *task_event) { - return event->attr.comm || event->attr.mmap || - event->attr.mmap_data || event->attr.task; + struct perf_event *event; + + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (perf_event_task_match(event)) + perf_event_task_output(event, task_event); + } +} + +static void perf_event_task_event(struct perf_task_event *task_event) +{ + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; + struct pmu *pmu; + int ctxn; + + rcu_read_lock(); + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); + if (cpuctx->unique_pmu != pmu) + goto next; + perf_event_task_ctx(&cpuctx->ctx, task_event); + + ctx = task_event->task_ctx; + if (!ctx) { + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + goto next; + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) + perf_event_task_ctx(ctx, task_event); + } +next: + put_cpu_ptr(pmu->pmu_cpu_context); + } + if (task_event->task_ctx) + perf_event_task_ctx(task_event->task_ctx, task_event); + + rcu_read_unlock(); } static void perf_event_task(struct task_struct *task, @@ -4614,10 +4531,7 @@ static void perf_event_task(struct task_struct *task, }, }; - perf_event_aux(perf_event_task_match, - perf_event_task_output, - &task_event, - task_ctx); + perf_event_task_event(&task_event); } void perf_event_fork(struct task_struct *task) @@ -4643,9 +4557,8 @@ struct perf_comm_event { }; static void perf_event_comm_output(struct perf_event *event, - void *data) + struct perf_comm_event *comm_event) { - struct perf_comm_event *comm_event = data; struct perf_output_handle handle; struct perf_sample_data sample; int size = comm_event->event_id.header.size; @@ -4672,16 +4585,39 @@ static void perf_event_comm_output(struct perf_event *event, comm_event->event_id.header.size = size; } -static int perf_event_comm_match(struct perf_event *event, - void *data __maybe_unused) +static int perf_event_comm_match(struct perf_event *event) { - return event->attr.comm; + if (event->state < PERF_EVENT_STATE_INACTIVE) + return 0; + + if (!event_filter_match(event)) + return 0; + + if (event->attr.comm) + return 1; + + return 0; +} + +static void perf_event_comm_ctx(struct perf_event_context *ctx, + struct perf_comm_event *comm_event) +{ + struct perf_event *event; + + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (perf_event_comm_match(event)) + perf_event_comm_output(event, comm_event); + } } static void perf_event_comm_event(struct perf_comm_event *comm_event) { + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; char comm[TASK_COMM_LEN]; unsigned int size; + struct pmu *pmu; + int ctxn; memset(comm, 0, sizeof(comm)); strlcpy(comm, comm_event->task->comm, sizeof(comm)); @@ -4691,11 +4627,24 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) comm_event->comm_size = size; comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; + rcu_read_lock(); + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); + if (cpuctx->unique_pmu != pmu) + goto next; + perf_event_comm_ctx(&cpuctx->ctx, comm_event); - perf_event_aux(perf_event_comm_match, - perf_event_comm_output, - comm_event, - NULL); + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + goto next; + + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) + perf_event_comm_ctx(ctx, comm_event); +next: + put_cpu_ptr(pmu->pmu_cpu_context); + } + rcu_read_unlock(); } void perf_event_comm(struct task_struct *task) @@ -4757,9 +4706,8 @@ struct perf_mmap_event { }; static void perf_event_mmap_output(struct perf_event *event, - void *data) + struct perf_mmap_event *mmap_event) { - struct perf_mmap_event *mmap_event = data; struct perf_output_handle handle; struct perf_sample_data sample; int size = mmap_event->event_id.header.size; @@ -4786,24 +4734,46 @@ static void perf_event_mmap_output(struct perf_event *event, } static int perf_event_mmap_match(struct perf_event *event, - void *data) + struct perf_mmap_event *mmap_event, + int executable) { - struct perf_mmap_event *mmap_event = data; - struct vm_area_struct *vma = mmap_event->vma; - int executable = vma->vm_flags & VM_EXEC; + if (event->state < PERF_EVENT_STATE_INACTIVE) + return 0; + + if (!event_filter_match(event)) + return 0; + + if ((!executable && event->attr.mmap_data) || + (executable && event->attr.mmap)) + return 1; - return (!executable && event->attr.mmap_data) || - (executable && event->attr.mmap); + return 0; +} + +static void perf_event_mmap_ctx(struct perf_event_context *ctx, + struct perf_mmap_event *mmap_event, + int executable) +{ + struct perf_event *event; + + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (perf_event_mmap_match(event, mmap_event, executable)) + perf_event_mmap_output(event, mmap_event); + } } static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) { + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; struct vm_area_struct *vma = mmap_event->vma; struct file *file = vma->vm_file; unsigned int size; char tmp[16]; char *buf = NULL; const char *name; + struct pmu *pmu; + int ctxn; memset(tmp, 0, sizeof(tmp)); @@ -4859,10 +4829,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; - perf_event_aux(perf_event_mmap_match, - perf_event_mmap_output, - mmap_event, - NULL); + rcu_read_lock(); + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); + if (cpuctx->unique_pmu != pmu) + goto next; + perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, + vma->vm_flags & VM_EXEC); + + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + goto next; + + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) { + perf_event_mmap_ctx(ctx, mmap_event, + vma->vm_flags & VM_EXEC); + } +next: + put_cpu_ptr(pmu->pmu_cpu_context); + } + rcu_read_unlock(); kfree(buf); } @@ -6487,8 +6474,6 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) if (atomic_read(&event->mmap_count)) goto unlock; - old_rb = event->rb; - if (output_event) { /* get the rb we want to redirect to */ rb = ring_buffer_get(output_event); @@ -6496,28 +6481,16 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) goto unlock; } + old_rb = event->rb; + rcu_assign_pointer(event->rb, rb); if (old_rb) ring_buffer_detach(event, old_rb); - - if (rb) - ring_buffer_attach(event, rb); - - rcu_assign_pointer(event->rb, rb); - - if (old_rb) { - ring_buffer_put(old_rb); - /* - * Since we detached before setting the new rb, so that we - * could attach the new rb, we could have missed a wakeup. - * Provide it now. - */ - wake_up_all(&event->waitq); - } - ret = 0; unlock: mutex_unlock(&event->mmap_mutex); + if (old_rb) + ring_buffer_put(old_rb); out: return ret; } diff --git a/trunk/kernel/events/hw_breakpoint.c b/trunk/kernel/events/hw_breakpoint.c index 20185ea64aa6..a64f8aeb5c1f 100644 --- a/trunk/kernel/events/hw_breakpoint.c +++ b/trunk/kernel/events/hw_breakpoint.c @@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) list_for_each_entry(iter, &bp_task_head, hw.bp_list) { if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type && - (iter->cpu < 0 || cpu == iter->cpu)) + cpu == iter->cpu) count += hw_breakpoint_weight(iter); } @@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, return; } - for_each_possible_cpu(cpu) { + for_each_online_cpu(cpu) { unsigned int nr; nr = per_cpu(nr_cpu_bp_pinned[type], cpu); @@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, if (cpu >= 0) { toggle_bp_task_slot(bp, cpu, enable, type, weight); } else { - for_each_possible_cpu(cpu) + for_each_online_cpu(cpu) toggle_bp_task_slot(bp, cpu, enable, type, weight); } diff --git a/trunk/kernel/events/internal.h b/trunk/kernel/events/internal.h index ca6599723be5..eb675c4d59df 100644 --- a/trunk/kernel/events/internal.h +++ b/trunk/kernel/events/internal.h @@ -31,10 +31,6 @@ struct ring_buffer { spinlock_t event_lock; struct list_head event_list; - atomic_t mmap_count; - unsigned long mmap_locked; - struct user_struct *mmap_user; - struct perf_event_mmap_page *user_page; void *data_pages[0]; }; diff --git a/trunk/kernel/exit.c b/trunk/kernel/exit.c index 7bb73f9d09db..af2eb3cbd499 100644 --- a/trunk/kernel/exit.c +++ b/trunk/kernel/exit.c @@ -649,6 +649,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead) * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ forget_original_parent(tsk); + exit_task_namespaces(tsk); write_lock_irq(&tasklist_lock); if (group_dead) @@ -794,7 +795,6 @@ void do_exit(long code) exit_shm(tsk); exit_files(tsk); exit_fs(tsk); - exit_task_namespaces(tsk); exit_task_work(tsk); check_stack_usage(); exit_thread(); diff --git a/trunk/kernel/irq/irqdomain.c b/trunk/kernel/irq/irqdomain.c index 54a4d5223238..5a83dde8ca0c 100644 --- a/trunk/kernel/irq/irqdomain.c +++ b/trunk/kernel/irq/irqdomain.c @@ -143,10 +143,7 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain, * irq_domain_add_simple() - Allocate and register a simple irq_domain. * @of_node: pointer to interrupt controller's device tree node. * @size: total number of irqs in mapping - * @first_irq: first number of irq block assigned to the domain, - * pass zero to assign irqs on-the-fly. This will result in a - * linear IRQ domain so it is important to use irq_create_mapping() - * for each used IRQ, especially when SPARSE_IRQ is enabled. + * @first_irq: first number of irq block assigned to the domain * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer * @@ -194,7 +191,6 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, /* A linear domain is the default */ return irq_domain_add_linear(of_node, size, ops, host_data); } -EXPORT_SYMBOL_GPL(irq_domain_add_simple); /** * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. @@ -401,12 +397,11 @@ static void irq_domain_disassociate_many(struct irq_domain *domain, while (count--) { int irq = irq_base + count; struct irq_data *irq_data = irq_get_irq_data(irq); - irq_hw_number_t hwirq; + irq_hw_number_t hwirq = irq_data->hwirq; if (WARN_ON(!irq_data || irq_data->domain != domain)) continue; - hwirq = irq_data->hwirq; irq_set_status_flags(irq, IRQ_NOREQUEST); /* remove chip and handler */ diff --git a/trunk/kernel/kmod.c b/trunk/kernel/kmod.c index 8241906c4b61..1296e72e4161 100644 --- a/trunk/kernel/kmod.c +++ b/trunk/kernel/kmod.c @@ -569,11 +569,6 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) int retval = 0; helper_lock(); - if (!sub_info->path) { - retval = -EINVAL; - goto out; - } - if (sub_info->path[0] == '\0') goto out; diff --git a/trunk/kernel/kprobes.c b/trunk/kernel/kprobes.c index bddf3b201a48..3fed7f0cbcdf 100644 --- a/trunk/kernel/kprobes.c +++ b/trunk/kernel/kprobes.c @@ -467,7 +467,6 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) /* Optimization staging list, protected by kprobe_mutex */ static LIST_HEAD(optimizing_list); static LIST_HEAD(unoptimizing_list); -static LIST_HEAD(freeing_list); static void kprobe_optimizer(struct work_struct *work); static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); @@ -505,7 +504,7 @@ static __kprobes void do_optimize_kprobes(void) * Unoptimize (replace a jump with a breakpoint and remove the breakpoint * if need) kprobes listed on unoptimizing_list. */ -static __kprobes void do_unoptimize_kprobes(void) +static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) { struct optimized_kprobe *op, *tmp; @@ -516,9 +515,9 @@ static __kprobes void do_unoptimize_kprobes(void) /* Ditto to do_optimize_kprobes */ get_online_cpus(); mutex_lock(&text_mutex); - arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); + arch_unoptimize_kprobes(&unoptimizing_list, free_list); /* Loop free_list for disarming */ - list_for_each_entry_safe(op, tmp, &freeing_list, list) { + list_for_each_entry_safe(op, tmp, free_list, list) { /* Disarm probes if marked disabled */ if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); @@ -537,11 +536,11 @@ static __kprobes void do_unoptimize_kprobes(void) } /* Reclaim all kprobes on the free_list */ -static __kprobes void do_free_cleaned_kprobes(void) +static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) { struct optimized_kprobe *op, *tmp; - list_for_each_entry_safe(op, tmp, &freeing_list, list) { + list_for_each_entry_safe(op, tmp, free_list, list) { BUG_ON(!kprobe_unused(&op->kp)); list_del_init(&op->list); free_aggr_kprobe(&op->kp); @@ -557,6 +556,8 @@ static __kprobes void kick_kprobe_optimizer(void) /* Kprobe jump optimizer */ static __kprobes void kprobe_optimizer(struct work_struct *work) { + LIST_HEAD(free_list); + mutex_lock(&kprobe_mutex); /* Lock modules while optimizing kprobes */ mutex_lock(&module_mutex); @@ -565,7 +566,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) * kprobes before waiting for quiesence period. */ - do_unoptimize_kprobes(); + do_unoptimize_kprobes(&free_list); /* * Step 2: Wait for quiesence period to ensure all running interrupts @@ -580,7 +581,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) do_optimize_kprobes(); /* Step 4: Free cleaned kprobes after quiesence period */ - do_free_cleaned_kprobes(); + do_free_cleaned_kprobes(&free_list); mutex_unlock(&module_mutex); mutex_unlock(&kprobe_mutex); @@ -722,19 +723,8 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p) if (!list_empty(&op->list)) /* Dequeue from the (un)optimization queue */ list_del_init(&op->list); - op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; - - if (kprobe_unused(p)) { - /* Enqueue if it is unused */ - list_add(&op->list, &freeing_list); - /* - * Remove unused probes from the hash list. After waiting - * for synchronization, this probe is reclaimed. - * (reclaiming is done by do_free_cleaned_kprobes().) - */ - hlist_del_rcu(&op->kp.hlist); - } + op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; /* Don't touch the code, because it is already freed. */ arch_remove_optimized_kprobe(op); } diff --git a/trunk/kernel/module.c b/trunk/kernel/module.c index cab4bce49c23..b049939177f6 100644 --- a/trunk/kernel/module.c +++ b/trunk/kernel/module.c @@ -2431,10 +2431,10 @@ static void kmemleak_load_module(const struct module *mod, kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); for (i = 1; i < info->hdr->e_shnum; i++) { - /* Scan all writable sections that's not executable */ - if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || - !(info->sechdrs[i].sh_flags & SHF_WRITE) || - (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) + const char *name = info->secstrings + info->sechdrs[i].sh_name; + if (!(info->sechdrs[i].sh_flags & SHF_ALLOC)) + continue; + if (!strstarts(name, ".data") && !strstarts(name, ".bss")) continue; kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, @@ -2769,11 +2769,24 @@ static void find_module_sections(struct module *mod, struct load_info *info) mod->trace_events = section_objs(info, "_ftrace_events", sizeof(*mod->trace_events), &mod->num_trace_events); + /* + * This section contains pointers to allocated objects in the trace + * code and not scanning it leads to false positives. + */ + kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * + mod->num_trace_events, GFP_KERNEL); #endif #ifdef CONFIG_TRACING mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", sizeof(*mod->trace_bprintk_fmt_start), &mod->num_trace_bprintk_fmt); + /* + * This section contains pointers to allocated objects in the trace + * code and not scanning it leads to false positives. + */ + kmemleak_scan_area(mod->trace_bprintk_fmt_start, + sizeof(*mod->trace_bprintk_fmt_start) * + mod->num_trace_bprintk_fmt, GFP_KERNEL); #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ diff --git a/trunk/kernel/printk.c b/trunk/kernel/printk.c index 8212c1aef125..fa36e1494420 100644 --- a/trunk/kernel/printk.c +++ b/trunk/kernel/printk.c @@ -363,53 +363,6 @@ static void log_store(int facility, int level, log_next_seq++; } -#ifdef CONFIG_SECURITY_DMESG_RESTRICT -int dmesg_restrict = 1; -#else -int dmesg_restrict; -#endif - -static int syslog_action_restricted(int type) -{ - if (dmesg_restrict) - return 1; - /* - * Unless restricted, we allow "read all" and "get buffer size" - * for everybody. - */ - return type != SYSLOG_ACTION_READ_ALL && - type != SYSLOG_ACTION_SIZE_BUFFER; -} - -static int check_syslog_permissions(int type, bool from_file) -{ - /* - * If this is from /proc/kmsg and we've already opened it, then we've - * already done the capabilities checks at open time. - */ - if (from_file && type != SYSLOG_ACTION_OPEN) - return 0; - - if (syslog_action_restricted(type)) { - if (capable(CAP_SYSLOG)) - return 0; - /* - * For historical reasons, accept CAP_SYS_ADMIN too, with - * a warning. - */ - if (capable(CAP_SYS_ADMIN)) { - pr_warn_once("%s (%d): Attempt to access syslog with " - "CAP_SYS_ADMIN but no CAP_SYSLOG " - "(deprecated).\n", - current->comm, task_pid_nr(current)); - return 0; - } - return -EPERM; - } - return security_syslog(type); -} - - /* /dev/kmsg - userspace message inject/listen interface */ struct devkmsg_user { u64 seq; @@ -667,8 +620,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) if ((file->f_flags & O_ACCMODE) == O_WRONLY) return 0; - err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, - SYSLOG_FROM_READER); + err = security_syslog(SYSLOG_ACTION_READ_ALL); if (err) return err; @@ -861,6 +813,45 @@ static inline void boot_delay_msec(int level) } #endif +#ifdef CONFIG_SECURITY_DMESG_RESTRICT +int dmesg_restrict = 1; +#else +int dmesg_restrict; +#endif + +static int syslog_action_restricted(int type) +{ + if (dmesg_restrict) + return 1; + /* Unless restricted, we allow "read all" and "get buffer size" for everybody */ + return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER; +} + +static int check_syslog_permissions(int type, bool from_file) +{ + /* + * If this is from /proc/kmsg and we've already opened it, then we've + * already done the capabilities checks at open time. + */ + if (from_file && type != SYSLOG_ACTION_OPEN) + return 0; + + if (syslog_action_restricted(type)) { + if (capable(CAP_SYSLOG)) + return 0; + /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ + if (capable(CAP_SYS_ADMIN)) { + printk_once(KERN_WARNING "%s (%d): " + "Attempt to access syslog with CAP_SYS_ADMIN " + "but no CAP_SYSLOG (deprecated).\n", + current->comm, task_pid_nr(current)); + return 0; + } + return -EPERM; + } + return 0; +} + #if defined(CONFIG_PRINTK_TIME) static bool printk_time = 1; #else @@ -1258,7 +1249,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) { - return do_syslog(type, buf, len, SYSLOG_FROM_READER); + return do_syslog(type, buf, len, SYSLOG_FROM_CALL); } /* diff --git a/trunk/kernel/ptrace.c b/trunk/kernel/ptrace.c index 335a7ae697f5..aed981a3f69c 100644 --- a/trunk/kernel/ptrace.c +++ b/trunk/kernel/ptrace.c @@ -665,22 +665,20 @@ static int ptrace_peek_siginfo(struct task_struct *child, if (unlikely(is_compat_task())) { compat_siginfo_t __user *uinfo = compat_ptr(data); - if (copy_siginfo_to_user32(uinfo, &info) || - __put_user(info.si_code, &uinfo->si_code)) { - ret = -EFAULT; - break; - } - + ret = copy_siginfo_to_user32(uinfo, &info); + ret |= __put_user(info.si_code, &uinfo->si_code); } else #endif { siginfo_t __user *uinfo = (siginfo_t __user *) data; - if (copy_siginfo_to_user(uinfo, &info) || - __put_user(info.si_code, &uinfo->si_code)) { - ret = -EFAULT; - break; - } + ret = copy_siginfo_to_user(uinfo, &info); + ret |= __put_user(info.si_code, &uinfo->si_code); + } + + if (ret) { + ret = -EFAULT; + break; } data += sizeof(siginfo_t); diff --git a/trunk/kernel/range.c b/trunk/kernel/range.c index 322ea8e93e4b..071b0ab455cb 100644 --- a/trunk/kernel/range.c +++ b/trunk/kernel/range.c @@ -4,7 +4,7 @@ #include #include #include -#include + #include int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) @@ -32,8 +32,9 @@ int add_range_with_merge(struct range *range, int az, int nr_range, if (start >= end) return nr_range; - /* get new start/end: */ + /* Try to merge it with old one: */ for (i = 0; i < nr_range; i++) { + u64 final_start, final_end; u64 common_start, common_end; if (!range[i].end) @@ -44,16 +45,12 @@ int add_range_with_merge(struct range *range, int az, int nr_range, if (common_start > common_end) continue; - /* new start/end, will add it back at last */ - start = min(range[i].start, start); - end = max(range[i].end, end); + final_start = min(range[i].start, start); + final_end = max(range[i].end, end); - memmove(&range[i], &range[i + 1], - (nr_range - (i + 1)) * sizeof(range[i])); - range[nr_range - 1].start = 0; - range[nr_range - 1].end = 0; - nr_range--; - i--; + range[i].start = final_start; + range[i].end = final_end; + return nr_range; } /* Need to add it: */ diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index 35380019f0fc..16ea67925015 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -1451,9 +1451,9 @@ static int rcu_gp_init(struct rcu_state *rsp) rnp->grphi, rnp->qsmask); raw_spin_unlock_irq(&rnp->lock); #ifdef CONFIG_PROVE_RCU_DELAY - if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 && + if ((prandom_u32() % (rcu_num_nodes * 8)) == 0 && system_state == SYSTEM_RUNNING) - udelay(200); + schedule_timeout_uninterruptible(2); #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ cond_resched(); } @@ -1613,14 +1613,6 @@ static int __noreturn rcu_gp_kthread(void *arg) } } -static void rsp_wakeup(struct irq_work *work) -{ - struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work); - - /* Wake up rcu_gp_kthread() to start the grace period. */ - wake_up(&rsp->gp_wq); -} - /* * Start a new RCU grace period if warranted, re-initializing the hierarchy * in preparation for detecting the next grace period. The caller must hold @@ -1645,12 +1637,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, } rsp->gp_flags = RCU_GP_FLAG_INIT; - /* - * We can't do wakeups while holding the rnp->lock, as that - * could cause possible deadlocks with the rq->lock. Deter - * the wakeup to interrupt context. - */ - irq_work_queue(&rsp->wakeup_work); + /* Wake up rcu_gp_kthread() to start the grace period. */ + wake_up(&rsp->gp_wq); } /* @@ -3247,7 +3235,6 @@ static void __init rcu_init_one(struct rcu_state *rsp, rsp->rda = rda; init_waitqueue_head(&rsp->gp_wq); - init_irq_work(&rsp->wakeup_work, rsp_wakeup); rnp = rsp->level[rcu_num_lvls - 1]; for_each_possible_cpu(i) { while (i > rnp->grphi) diff --git a/trunk/kernel/rcutree.h b/trunk/kernel/rcutree.h index 4df503470e42..da77a8f57ff9 100644 --- a/trunk/kernel/rcutree.h +++ b/trunk/kernel/rcutree.h @@ -27,7 +27,6 @@ #include #include #include -#include /* * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and @@ -443,7 +442,6 @@ struct rcu_state { char *name; /* Name of structure. */ char abbr; /* Abbreviated name. */ struct list_head flavors; /* List of RCU flavors. */ - struct irq_work wakeup_work; /* Postponed wakeups */ }; /* Values for rcu_state structure's gp_flags field. */ diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 3db5a375d8dd..170814dc418f 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -88,7 +88,7 @@ static void __init rcu_bootup_announce_oddness(void) #ifdef CONFIG_RCU_NOCB_CPU #ifndef CONFIG_RCU_NOCB_CPU_NONE if (!have_rcu_nocb_mask) { - zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL); + alloc_bootmem_cpumask_var(&rcu_nocb_mask); have_rcu_nocb_mask = true; } #ifdef CONFIG_RCU_NOCB_CPU_ZERO @@ -1667,7 +1667,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj) rdtp->last_accelerate = jiffies; /* Request timer delay depending on laziness, and round. */ - if (!rdtp->all_lazy) { + if (rdtp->all_lazy) { *dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies; } else { diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index e8b335016c52..58453b8272fd 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -633,19 +633,7 @@ void wake_up_nohz_cpu(int cpu) static inline bool got_nohz_idle_kick(void) { int cpu = smp_processor_id(); - - if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) - return false; - - if (idle_cpu(cpu) && !need_resched()) - return true; - - /* - * We can't run Idle Load Balance on this CPU for this time so we - * cancel it and clear NOHZ_BALANCE_KICK - */ - clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); - return false; + return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); } #else /* CONFIG_NO_HZ_COMMON */ @@ -1405,9 +1393,8 @@ static void sched_ttwu_pending(void) void scheduler_ipi(void) { - if (llist_empty(&this_rq()->wake_list) - && !tick_nohz_full_cpu(smp_processor_id()) - && !got_nohz_idle_kick()) + if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() + && !tick_nohz_full_cpu(smp_processor_id())) return; /* @@ -1430,7 +1417,7 @@ void scheduler_ipi(void) /* * Check if someone kicked us for doing the nohz idle load balance. */ - if (unlikely(got_nohz_idle_kick())) { + if (unlikely(got_nohz_idle_kick() && !need_resched())) { this_rq()->idle_balance = 1; raise_softirq_irqoff(SCHED_SOFTIRQ); } @@ -4758,7 +4745,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) */ idle->sched_class = &idle_sched_class; ftrace_graph_init_idle_task(idle, cpu); - vtime_init_idle(idle, cpu); + vtime_init_idle(idle); #if defined(CONFIG_SMP) sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); #endif diff --git a/trunk/kernel/sched/cputime.c b/trunk/kernel/sched/cputime.c index b5ccba22603b..cc2dc3eea8a3 100644 --- a/trunk/kernel/sched/cputime.c +++ b/trunk/kernel/sched/cputime.c @@ -747,17 +747,17 @@ void arch_vtime_task_switch(struct task_struct *prev) write_seqlock(¤t->vtime_seqlock); current->vtime_snap_whence = VTIME_SYS; - current->vtime_snap = sched_clock_cpu(smp_processor_id()); + current->vtime_snap = sched_clock(); write_sequnlock(¤t->vtime_seqlock); } -void vtime_init_idle(struct task_struct *t, int cpu) +void vtime_init_idle(struct task_struct *t) { unsigned long flags; write_seqlock_irqsave(&t->vtime_seqlock, flags); t->vtime_snap_whence = VTIME_SYS; - t->vtime_snap = sched_clock_cpu(cpu); + t->vtime_snap = sched_clock(); write_sequnlock_irqrestore(&t->vtime_seqlock, flags); } diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index 3d6833f125d3..b5197dcb0dad 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -195,12 +195,8 @@ void local_bh_enable_ip(unsigned long ip) EXPORT_SYMBOL(local_bh_enable_ip); /* - * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, - * but break the loop if need_resched() is set or after 2 ms. - * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in - * certain cases, such as stop_machine(), jiffies may cease to - * increment and so we need the MAX_SOFTIRQ_RESTART limit as - * well to make sure we eventually return from this method. + * We restart softirq processing for at most 2 ms, + * and if need_resched() is not set. * * These limits have been established via experimentation. * The two things to balance is latency against fairness - @@ -208,7 +204,6 @@ EXPORT_SYMBOL(local_bh_enable_ip); * should not be able to lock up the box. */ #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) -#define MAX_SOFTIRQ_RESTART 10 asmlinkage void __do_softirq(void) { @@ -217,7 +212,6 @@ asmlinkage void __do_softirq(void) unsigned long end = jiffies + MAX_SOFTIRQ_TIME; int cpu; unsigned long old_flags = current->flags; - int max_restart = MAX_SOFTIRQ_RESTART; /* * Mask out PF_MEMALLOC s current task context is borrowed for the @@ -271,8 +265,7 @@ asmlinkage void __do_softirq(void) pending = local_softirq_pending(); if (pending) { - if (time_before(jiffies, end) && !need_resched() && - --max_restart) + if (time_before(jiffies, end) && !need_resched()) goto restart; wakeup_softirqd(); diff --git a/trunk/kernel/sys.c b/trunk/kernel/sys.c index 2bbd9a73b54c..b95d3c72ba21 100644 --- a/trunk/kernel/sys.c +++ b/trunk/kernel/sys.c @@ -362,29 +362,6 @@ int unregister_reboot_notifier(struct notifier_block *nb) } EXPORT_SYMBOL(unregister_reboot_notifier); -/* Add backwards compatibility for stable trees. */ -#ifndef PF_NO_SETAFFINITY -#define PF_NO_SETAFFINITY PF_THREAD_BOUND -#endif - -static void migrate_to_reboot_cpu(void) -{ - /* The boot cpu is always logical cpu 0 */ - int cpu = 0; - - cpu_hotplug_disable(); - - /* Make certain the cpu I'm about to reboot on is online */ - if (!cpu_online(cpu)) - cpu = cpumask_first(cpu_online_mask); - - /* Prevent races with other tasks migrating this task */ - current->flags |= PF_NO_SETAFFINITY; - - /* Make certain I only run on the appropriate processor */ - set_cpus_allowed_ptr(current, cpumask_of(cpu)); -} - /** * kernel_restart - reboot the system * @cmd: pointer to buffer containing command to execute for restart @@ -396,7 +373,7 @@ static void migrate_to_reboot_cpu(void) void kernel_restart(char *cmd) { kernel_restart_prepare(cmd); - migrate_to_reboot_cpu(); + disable_nonboot_cpus(); syscore_shutdown(); if (!cmd) printk(KERN_EMERG "Restarting system.\n"); @@ -423,7 +400,7 @@ static void kernel_shutdown_prepare(enum system_states state) void kernel_halt(void) { kernel_shutdown_prepare(SYSTEM_HALT); - migrate_to_reboot_cpu(); + disable_nonboot_cpus(); syscore_shutdown(); printk(KERN_EMERG "System halted.\n"); kmsg_dump(KMSG_DUMP_HALT); @@ -442,7 +419,7 @@ void kernel_power_off(void) kernel_shutdown_prepare(SYSTEM_POWER_OFF); if (pm_power_off_prepare) pm_power_off_prepare(); - migrate_to_reboot_cpu(); + disable_nonboot_cpus(); syscore_shutdown(); printk(KERN_EMERG "Power down.\n"); kmsg_dump(KMSG_DUMP_POWEROFF); diff --git a/trunk/kernel/time/Kconfig b/trunk/kernel/time/Kconfig index 70f27e89012b..e4c07b0692bb 100644 --- a/trunk/kernel/time/Kconfig +++ b/trunk/kernel/time/Kconfig @@ -12,6 +12,11 @@ config CLOCKSOURCE_WATCHDOG config ARCH_CLOCKSOURCE_DATA bool +# Platforms has a persistent clock +config ALWAYS_USE_PERSISTENT_CLOCK + bool + default n + # Timekeeping vsyscall support config GENERIC_TIME_VSYSCALL bool diff --git a/trunk/kernel/time/ntp.c b/trunk/kernel/time/ntp.c index 8f5b3b98577b..12ff13a838c6 100644 --- a/trunk/kernel/time/ntp.c +++ b/trunk/kernel/time/ntp.c @@ -874,6 +874,7 @@ static void hardpps_update_phase(long error) void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) { struct pps_normtime pts_norm, freq_norm; + unsigned long flags; pts_norm = pps_normalize_ts(*phase_ts); diff --git a/trunk/kernel/time/tick-broadcast.c b/trunk/kernel/time/tick-broadcast.c index 20d6fba70652..206bbfb34e09 100644 --- a/trunk/kernel/time/tick-broadcast.c +++ b/trunk/kernel/time/tick-broadcast.c @@ -511,12 +511,6 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) } } - /* - * Remove the current cpu from the pending mask. The event is - * delivered immediately in tick_do_broadcast() ! - */ - cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask); - /* Take care of enforced broadcast requests */ cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask); cpumask_clear(tick_broadcast_force_mask); @@ -581,8 +575,8 @@ void tick_broadcast_oneshot_control(unsigned long reason) raw_spin_lock_irqsave(&tick_broadcast_lock, flags); if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { + WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { - WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); /* * We only reprogram the broadcast timer if we @@ -599,6 +593,8 @@ void tick_broadcast_oneshot_control(unsigned long reason) } else { if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + if (dev->next_event.tv64 == KTIME_MAX) + goto out; /* * The cpu which was handling the broadcast * timer marked this cpu in the broadcast @@ -612,11 +608,6 @@ void tick_broadcast_oneshot_control(unsigned long reason) tick_broadcast_pending_mask)) goto out; - /* - * Bail out if there is no next event. - */ - if (dev->next_event.tv64 == KTIME_MAX) - goto out; /* * If the pending bit is not set, then we are * either the CPU handling the broadcast @@ -701,6 +692,10 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) bc->event_handler = tick_handle_oneshot_broadcast; + /* Take the do_timer update */ + if (!tick_nohz_full_cpu(cpu)) + tick_do_timer_cpu = cpu; + /* * We must be careful here. There might be other CPUs * waiting for periodic broadcast. We need to set the @@ -791,11 +786,11 @@ bool tick_broadcast_oneshot_available(void) void __init tick_broadcast_init(void) { - zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); - zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); + alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); + alloc_cpumask_var(&tmpmask, GFP_NOWAIT); #ifdef CONFIG_TICK_ONESHOT - zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); - zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); - zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); + alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); + alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); + alloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); #endif } diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index 0cf1c1453181..bc67d4245e1d 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -306,7 +306,7 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, * we can't safely shutdown that CPU. */ if (have_nohz_full_mask && tick_do_timer_cpu == cpu) - return NOTIFY_BAD; + return -EINVAL; break; } return NOTIFY_OK; @@ -717,7 +717,6 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) if (unlikely(!cpu_online(cpu))) { if (cpu == tick_do_timer_cpu) tick_do_timer_cpu = TICK_DO_TIMER_NONE; - return false; } if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) @@ -1169,7 +1168,7 @@ void tick_cancel_sched_timer(int cpu) hrtimer_cancel(&ts->sched_timer); # endif - memset(ts, 0, sizeof(*ts)); + ts->nohz_mode = NOHZ_MODE_INACTIVE; } #endif diff --git a/trunk/kernel/time/timekeeping.c b/trunk/kernel/time/timekeeping.c index baeeb5c87cf1..98cd470bbe49 100644 --- a/trunk/kernel/time/timekeeping.c +++ b/trunk/kernel/time/timekeeping.c @@ -975,14 +975,6 @@ static int timekeeping_suspend(void) read_persistent_clock(&timekeeping_suspend_time); - /* - * On some systems the persistent_clock can not be detected at - * timekeeping_init by its return value, so if we see a valid - * value returned, update the persistent_clock_exists flag. - */ - if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec) - persistent_clock_exist = true; - raw_spin_lock_irqsave(&timekeeper_lock, flags); write_seqcount_begin(&timekeeper_seq); timekeeping_forward_now(tk); diff --git a/trunk/kernel/timer.c b/trunk/kernel/timer.c index 15ffdb3f1948..a860bba34412 100644 --- a/trunk/kernel/timer.c +++ b/trunk/kernel/timer.c @@ -1539,12 +1539,12 @@ static int __cpuinit init_timers_cpu(int cpu) boot_done = 1; base = &boot_tvec_bases; } - spin_lock_init(&base->lock); tvec_base_done[cpu] = 1; } else { base = per_cpu(tvec_bases, cpu); } + spin_lock_init(&base->lock); for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c index 6c508ff33c62..b549b0f5b977 100644 --- a/trunk/kernel/trace/ftrace.c +++ b/trunk/kernel/trace/ftrace.c @@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); /* * Traverse the ftrace_global_list, invoking all entries. The reason that we - * can use rcu_dereference_raw_notrace() is that elements removed from this list + * can use rcu_dereference_raw() is that elements removed from this list * are simply leaked, so there is no need to interact with a grace-period - * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle + * mechanism. The rcu_dereference_raw() calls are needed to handle * concurrent insertions into the ftrace_global_list. * * Silly Alpha and silly pointer-speculation compiler optimizations! */ #define do_for_each_ftrace_op(op, list) \ - op = rcu_dereference_raw_notrace(list); \ + op = rcu_dereference_raw(list); \ do /* * Optimized for just a single item in the list (as that is the normal case). */ #define while_for_each_ftrace_op(op) \ - while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ + while (likely(op = rcu_dereference_raw((op)->next)) && \ unlikely((op) != &ftrace_list_end)) static inline void ftrace_ops_init(struct ftrace_ops *ops) @@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) if (hlist_empty(hhd)) return NULL; - hlist_for_each_entry_rcu_notrace(rec, hhd, node) { + hlist_for_each_entry_rcu(rec, hhd, node) { if (rec->ip == ip) return rec; } @@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) hhd = &hash->buckets[key]; - hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { + hlist_for_each_entry_rcu(entry, hhd, hlist) { if (entry->ip == ip) return entry; } @@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) struct ftrace_hash *notrace_hash; int ret; - filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); - notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); + filter_hash = rcu_dereference_raw(ops->filter_hash); + notrace_hash = rcu_dereference_raw(ops->notrace_hash); if ((ftrace_hash_empty(filter_hash) || ftrace_lookup_ip(filter_hash, ip)) && @@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, * on the hash. rcu_read_lock is too dangerous here. */ preempt_disable_notrace(); - hlist_for_each_entry_rcu_notrace(entry, hhd, node) { + hlist_for_each_entry_rcu(entry, hhd, node) { if (entry->ip == ip) entry->ops->func(ip, parent_ip, &entry->data); } diff --git a/trunk/kernel/trace/ring_buffer.c b/trunk/kernel/trace/ring_buffer.c index e444ff88f0a4..b59aea2c48c2 100644 --- a/trunk/kernel/trace/ring_buffer.c +++ b/trunk/kernel/trace/ring_buffer.c @@ -620,9 +620,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, if (cpu == RING_BUFFER_ALL_CPUS) work = &buffer->irq_work; else { - if (!cpumask_test_cpu(cpu, buffer->cpumask)) - return -EINVAL; - cpu_buffer = buffer->buffers[cpu]; work = &cpu_buffer->irq_work; } diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index e71a8be4a6ee..ae6fa2d1cdf7 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -652,6 +652,8 @@ static struct { ARCH_TRACE_CLOCKS }; +int trace_clock_id; + /* * trace_parser_get_init - gets the buffer for trace parser */ @@ -841,15 +843,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); max_data->pid = tsk->pid; - /* - * If tsk == current, then use current_uid(), as that does not use - * RCU. The irq tracer can be called out of RCU scope. - */ - if (tsk == current) - max_data->uid = current_uid(); - else - max_data->uid = task_uid(tsk); - + max_data->uid = task_uid(tsk); max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->policy = tsk->policy; max_data->rt_priority = tsk->rt_priority; @@ -2824,7 +2818,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ - if (trace_clocks[tr->clock_id].in_ns) + if (trace_clocks[trace_clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; /* stop the trace while dumping if we are not opening "snapshot" */ @@ -3823,7 +3817,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) iter->iter_flags |= TRACE_FILE_LAT_FMT; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ - if (trace_clocks[tr->clock_id].in_ns) + if (trace_clocks[trace_clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; iter->cpu_file = tc->cpu; @@ -5093,7 +5087,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "bytes: %ld\n", cnt); - if (trace_clocks[tr->clock_id].in_ns) { + if (trace_clocks[trace_clock_id].in_ns) { /* local or global for trace_clock */ t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); @@ -6222,15 +6216,10 @@ __init static int tracer_alloc_buffers(void) trace_init_cmdlines(); - /* - * register_tracer() might reference current_trace, so it - * needs to be set before we register anything. This is - * just a bootstrap of current_trace anyway. - */ - global_trace.current_trace = &nop_trace; - register_tracer(&nop_trace); + global_trace.current_trace = &nop_trace; + /* All seems OK, enable tracing */ tracing_disabled = 0; diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h index 20572ed88c5c..711ca7d3e7f1 100644 --- a/trunk/kernel/trace/trace.h +++ b/trunk/kernel/trace/trace.h @@ -700,6 +700,8 @@ enum print_line_t print_trace_line(struct trace_iterator *iter); extern unsigned long trace_flags; +extern int trace_clock_id; + /* Standard output formatting function used for function return traces */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/trunk/kernel/trace/trace_events.c b/trunk/kernel/trace/trace_events.c index 27963e2bf4bf..7a0cf68027cc 100644 --- a/trunk/kernel/trace/trace_events.c +++ b/trunk/kernel/trace/trace_events.c @@ -2072,10 +2072,8 @@ event_enable_func(struct ftrace_hash *hash, out_reg: /* Don't let event modules unload while probe registered */ ret = try_module_get(file->event_call->mod); - if (!ret) { - ret = -EBUSY; + if (!ret) goto out_free; - } ret = __ftrace_event_enable_disable(file, 1, 1); if (ret < 0) diff --git a/trunk/kernel/trace/trace_events_filter.c b/trunk/kernel/trace/trace_events_filter.c index e1b653f7e1ca..a6361178de5a 100644 --- a/trunk/kernel/trace/trace_events_filter.c +++ b/trunk/kernel/trace/trace_events_filter.c @@ -750,11 +750,7 @@ static int filter_set_pred(struct event_filter *filter, static void __free_preds(struct event_filter *filter) { - int i; - if (filter->preds) { - for (i = 0; i < filter->n_preds; i++) - kfree(filter->preds[i].ops); kfree(filter->preds); filter->preds = NULL; } diff --git a/trunk/kernel/trace/trace_kprobe.c b/trunk/kernel/trace/trace_kprobe.c index 9f46e98ba8f2..636d45fe69b3 100644 --- a/trunk/kernel/trace/trace_kprobe.c +++ b/trunk/kernel/trace/trace_kprobe.c @@ -35,7 +35,7 @@ struct trace_probe { const char *symbol; /* symbol name */ struct ftrace_event_class class; struct ftrace_event_call call; - struct ftrace_event_file * __rcu *files; + struct ftrace_event_file **files; ssize_t size; /* trace entry size */ unsigned int nr_args; struct probe_arg args[]; @@ -185,14 +185,9 @@ static struct trace_probe *find_trace_probe(const char *event, static int trace_probe_nr_files(struct trace_probe *tp) { - struct ftrace_event_file **file; + struct ftrace_event_file **file = tp->files; int ret = 0; - /* - * Since all tp->files updater is protected by probe_enable_lock, - * we don't need to lock an rcu_read_lock. - */ - file = rcu_dereference_raw(tp->files); if (file) while (*(file++)) ret++; @@ -214,10 +209,9 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) mutex_lock(&probe_enable_lock); if (file) { - struct ftrace_event_file **new, **old; + struct ftrace_event_file **new, **old = tp->files; int n = trace_probe_nr_files(tp); - old = rcu_dereference_raw(tp->files); /* 1 is for new one and 1 is for stopper */ new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), GFP_KERNEL); @@ -257,17 +251,11 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) static int trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) { - struct ftrace_event_file **files; int i; - /* - * Since all tp->files updater is protected by probe_enable_lock, - * we don't need to lock an rcu_read_lock. - */ - files = rcu_dereference_raw(tp->files); - if (files) { - for (i = 0; files[i]; i++) - if (files[i] == file) + if (tp->files) { + for (i = 0; tp->files[i]; i++) + if (tp->files[i] == file) return i; } @@ -286,11 +274,10 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) mutex_lock(&probe_enable_lock); if (file) { - struct ftrace_event_file **new, **old; + struct ftrace_event_file **new, **old = tp->files; int n = trace_probe_nr_files(tp); int i, j; - old = rcu_dereference_raw(tp->files); if (n == 0 || trace_probe_file_index(tp, file) < 0) { ret = -EINVAL; goto out_unlock; @@ -885,16 +872,9 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, static __kprobes void kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) { - /* - * Note: preempt is already disabled around the kprobe handler. - * However, we still need an smp_read_barrier_depends() corresponding - * to smp_wmb() in rcu_assign_pointer() to access the pointer. - */ - struct ftrace_event_file **file = rcu_dereference_raw(tp->files); - - if (unlikely(!file)) - return; + struct ftrace_event_file **file = tp->files; + /* Note: preempt is already disabled around the kprobe handler */ while (*file) { __kprobe_trace_func(tp, regs, *file); file++; @@ -945,16 +925,9 @@ static __kprobes void kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, struct pt_regs *regs) { - /* - * Note: preempt is already disabled around the kprobe handler. - * However, we still need an smp_read_barrier_depends() corresponding - * to smp_wmb() in rcu_assign_pointer() to access the pointer. - */ - struct ftrace_event_file **file = rcu_dereference_raw(tp->files); - - if (unlikely(!file)) - return; + struct ftrace_event_file **file = tp->files; + /* Note: preempt is already disabled around the kprobe handler */ while (*file) { __kretprobe_trace_func(tp, ri, regs, *file); file++; @@ -962,7 +935,7 @@ kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, } /* Event entry printers */ -static enum print_line_t +enum print_line_t print_kprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) { @@ -998,7 +971,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags, return TRACE_TYPE_PARTIAL_LINE; } -static enum print_line_t +enum print_line_t print_kretprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) { diff --git a/trunk/kernel/trace/trace_selftest.c b/trunk/kernel/trace/trace_selftest.c index 2901e3b88590..55e2cf66967b 100644 --- a/trunk/kernel/trace/trace_selftest.c +++ b/trunk/kernel/trace/trace_selftest.c @@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) /* stop the tracing. */ tracing_stop(); /* check the trace buffer */ - ret = trace_test_buffer(&tr->trace_buffer, &count); + ret = trace_test_buffer(tr, &count); trace->reset(tr); tracing_start(); diff --git a/trunk/kernel/wait.c b/trunk/kernel/wait.c index ce0daa320a26..6698e0c04ead 100644 --- a/trunk/kernel/wait.c +++ b/trunk/kernel/wait.c @@ -287,91 +287,3 @@ wait_queue_head_t *bit_waitqueue(void *word, int bit) return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; } EXPORT_SYMBOL(bit_waitqueue); - -/* - * Manipulate the atomic_t address to produce a better bit waitqueue table hash - * index (we're keying off bit -1, but that would produce a horrible hash - * value). - */ -static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) -{ - if (BITS_PER_LONG == 64) { - unsigned long q = (unsigned long)p; - return bit_waitqueue((void *)(q & ~1), q & 1); - } - return bit_waitqueue(p, 0); -} - -static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync, - void *arg) -{ - struct wait_bit_key *key = arg; - struct wait_bit_queue *wait_bit - = container_of(wait, struct wait_bit_queue, wait); - atomic_t *val = key->flags; - - if (wait_bit->key.flags != key->flags || - wait_bit->key.bit_nr != key->bit_nr || - atomic_read(val) != 0) - return 0; - return autoremove_wake_function(wait, mode, sync, key); -} - -/* - * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting, - * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero - * return codes halt waiting and return. - */ -static __sched -int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q, - int (*action)(atomic_t *), unsigned mode) -{ - atomic_t *val; - int ret = 0; - - do { - prepare_to_wait(wq, &q->wait, mode); - val = q->key.flags; - if (atomic_read(val) == 0) - ret = (*action)(val); - } while (!ret && atomic_read(val) != 0); - finish_wait(wq, &q->wait); - return ret; -} - -#define DEFINE_WAIT_ATOMIC_T(name, p) \ - struct wait_bit_queue name = { \ - .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \ - .wait = { \ - .private = current, \ - .func = wake_atomic_t_function, \ - .task_list = \ - LIST_HEAD_INIT((name).wait.task_list), \ - }, \ - } - -__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *), - unsigned mode) -{ - wait_queue_head_t *wq = atomic_t_waitqueue(p); - DEFINE_WAIT_ATOMIC_T(wait, p); - - return __wait_on_atomic_t(wq, &wait, action, mode); -} -EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); - -/** - * wake_up_atomic_t - Wake up a waiter on a atomic_t - * @word: The word being waited on, a kernel virtual address - * @bit: The bit of the word being waited on - * - * Wake up anyone waiting for the atomic_t to go to zero. - * - * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t - * check is done by the waiter's wake function, not the by the waker itself). - */ -void wake_up_atomic_t(atomic_t *p) -{ - __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR); -} -EXPORT_SYMBOL(wake_up_atomic_t); diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index ee8e29a2320c..4aa9f5bc6b2d 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; struct workqueue_struct *system_wq __read_mostly; -EXPORT_SYMBOL(system_wq); +EXPORT_SYMBOL_GPL(system_wq); struct workqueue_struct *system_highpri_wq __read_mostly; EXPORT_SYMBOL_GPL(system_highpri_wq); struct workqueue_struct *system_long_wq __read_mostly; @@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, local_irq_restore(flags); return ret; } -EXPORT_SYMBOL(queue_work_on); +EXPORT_SYMBOL_GPL(queue_work_on); void delayed_work_timer_fn(unsigned long __data) { @@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, local_irq_restore(flags); return ret; } -EXPORT_SYMBOL(queue_delayed_work_on); +EXPORT_SYMBOL_GPL(queue_delayed_work_on); /** * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU @@ -2059,7 +2059,6 @@ static bool manage_workers(struct worker *worker) if (unlikely(!mutex_trylock(&pool->manager_mutex))) { spin_unlock_irq(&pool->lock); mutex_lock(&pool->manager_mutex); - spin_lock_irq(&pool->lock); ret = true; } @@ -4312,12 +4311,6 @@ bool current_is_workqueue_rescuer(void) * no synchronization around this function and the test result is * unreliable and only useful as advisory hints or for debugging. * - * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. - * Note that both per-cpu and unbound workqueues may be associated with - * multiple pool_workqueues which have separate congested states. A - * workqueue being congested on one CPU doesn't mean the workqueue is also - * contested on other CPUs / NUMA nodes. - * * RETURNS: * %true if congested, %false otherwise. */ @@ -4328,9 +4321,6 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) rcu_read_lock_sched(); - if (cpu == WORK_CPU_UNBOUND) - cpu = smp_processor_id(); - if (!(wq->flags & WQ_UNBOUND)) pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); else @@ -4905,8 +4895,7 @@ static void __init wq_numa_init(void) BUG_ON(!tbl); for_each_node(node) - BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, - node_online(node) ? node : NUMA_NO_NODE)); + BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node)); for_each_possible_cpu(cpu) { node = cpu_to_node(cpu); diff --git a/trunk/lib/Makefile b/trunk/lib/Makefile index c55a037a354e..e9c52e1b853a 100644 --- a/trunk/lib/Makefile +++ b/trunk/lib/Makefile @@ -23,7 +23,7 @@ lib-y += kobject.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o \ + gcd.o lcm.o list_sort.o uuid.o flex_array.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o diff --git a/trunk/lib/iovec.c b/trunk/lib/iovec.c deleted file mode 100644 index 454baa88bf27..000000000000 --- a/trunk/lib/iovec.c +++ /dev/null @@ -1,53 +0,0 @@ -#include -#include -#include - -/* - * Copy iovec to kernel. Returns -EFAULT on error. - * - * Note: this modifies the original iovec. - */ - -int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) -{ - while (len > 0) { - if (iov->iov_len) { - int copy = min_t(unsigned int, len, iov->iov_len); - if (copy_from_user(kdata, iov->iov_base, copy)) - return -EFAULT; - len -= copy; - kdata += copy; - iov->iov_base += copy; - iov->iov_len -= copy; - } - iov++; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_fromiovec); - -/* - * Copy kernel to iovec. Returns -EFAULT on error. - * - * Note: this modifies the original iovec. - */ - -int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) -{ - while (len > 0) { - if (iov->iov_len) { - int copy = min_t(unsigned int, iov->iov_len, len); - if (copy_to_user(iov->iov_base, kdata, copy)) - return -EFAULT; - kdata += copy; - len -= copy; - iov->iov_len -= copy; - iov->iov_base += copy; - } - iov++; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_toiovec); diff --git a/trunk/lib/klist.c b/trunk/lib/klist.c index 358a368a2947..0874e41609a6 100644 --- a/trunk/lib/klist.c +++ b/trunk/lib/klist.c @@ -193,10 +193,10 @@ static void klist_release(struct kref *kref) if (waiter->node != n) continue; - list_del(&waiter->list); waiter->woken = 1; mb(); wake_up_process(waiter->process); + list_del(&waiter->list); } spin_unlock(&klist_remove_lock); knode_set_klist(n, NULL); diff --git a/trunk/lib/mpi/longlong.h b/trunk/lib/mpi/longlong.h index d411355f238e..095ab157a521 100644 --- a/trunk/lib/mpi/longlong.h +++ b/trunk/lib/mpi/longlong.h @@ -318,8 +318,7 @@ extern UDItype __udiv_qrnnd(); "rM" ((USItype)(bh)), \ "rM" ((USItype)(al)), \ "rM" ((USItype)(bl))) -#if 0 && defined(_PA_RISC1_1) -/* xmpyu uses floating point register which is not allowed in Linux kernel. */ +#if defined(_PA_RISC1_1) #define umul_ppmm(wh, wl, u, v) \ do { \ union {UDItype __ll; \ @@ -338,7 +337,7 @@ do { \ #define UMUL_TIME 40 #define UDIV_TIME 80 #endif -#if 0 /* #ifndef LONGLONG_STANDALONE */ +#ifndef LONGLONG_STANDALONE #define udiv_qrnnd(q, r, n1, n0, d) \ do { USItype __r; \ (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ diff --git a/trunk/lib/mpi/mpicoder.c b/trunk/lib/mpi/mpicoder.c index 4cc6442733f4..5f9c44cdf1f5 100644 --- a/trunk/lib/mpi/mpicoder.c +++ b/trunk/lib/mpi/mpicoder.c @@ -37,7 +37,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes) mpi_limb_t a; MPI val = NULL; - while (nbytes > 0 && buffer[0] == 0) { + while (nbytes >= 0 && buffer[0] == 0) { buffer++; nbytes--; } diff --git a/trunk/mm/frontswap.c b/trunk/mm/frontswap.c index 1b24bdcb3197..538367ef1372 100644 --- a/trunk/mm/frontswap.c +++ b/trunk/mm/frontswap.c @@ -319,7 +319,7 @@ void __frontswap_invalidate_area(unsigned type) return; frontswap_ops->invalidate_area(type); atomic_set(&sis->frontswap_pages, 0); - bitmap_zero(sis->frontswap_map, sis->max); + memset(sis->frontswap_map, 0, sis->max / sizeof(long)); } clear_bit(type, need_init); } diff --git a/trunk/mm/huge_memory.c b/trunk/mm/huge_memory.c index 362c329b83fe..03a89a2f464b 100644 --- a/trunk/mm/huge_memory.c +++ b/trunk/mm/huge_memory.c @@ -2325,12 +2325,7 @@ static void collapse_huge_page(struct mm_struct *mm, pte_unmap(pte); spin_lock(&mm->page_table_lock); BUG_ON(!pmd_none(*pmd)); - /* - * We can only use set_pmd_at when establishing - * hugepmds and never for establishing regular pmds that - * points to regular pagetables. Use pmd_populate for that - */ - pmd_populate(mm, pmd, pmd_pgtable(_pmd)); + set_pmd_at(mm, address, pmd, _pmd); spin_unlock(&mm->page_table_lock); anon_vma_unlock_write(vma->anon_vma); goto out; diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index e2bfbf73a551..f8feeeca6686 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (ptep) { entry = huge_ptep_get(ptep); if (unlikely(is_hugetlb_entry_migration(entry))) { - migration_entry_wait_huge(mm, ptep); + migration_entry_wait(mm, (pmd_t *)ptep, address); return 0; } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) return VM_FAULT_HWPOISON_LARGE | diff --git a/trunk/mm/memcontrol.c b/trunk/mm/memcontrol.c index 194721839cf5..cb1c9dedf9b6 100644 --- a/trunk/mm/memcontrol.c +++ b/trunk/mm/memcontrol.c @@ -1199,6 +1199,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, mz = mem_cgroup_zoneinfo(root, nid, zid); iter = &mz->reclaim_iter[reclaim->priority]; + last_visited = iter->last_visited; if (prev && reclaim->generation != iter->generation) { iter->last_visited = NULL; goto out_unlock; @@ -1217,12 +1218,13 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, * is alive. */ dead_count = atomic_read(&root->dead_count); - if (dead_count == iter->last_dead_count) { - smp_rmb(); - last_visited = iter->last_visited; - if (last_visited && - !css_tryget(&last_visited->css)) + smp_rmb(); + last_visited = iter->last_visited; + if (last_visited) { + if ((dead_count != iter->last_dead_count) || + !css_tryget(&last_visited->css)) { last_visited = NULL; + } } } @@ -3139,6 +3141,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) return -ENOMEM; } + INIT_WORK(&s->memcg_params->destroy, + kmem_cache_destroy_work_func); s->memcg_params->is_root_cache = true; /* @@ -4104,6 +4108,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, if (mem_cgroup_disabled()) return NULL; + VM_BUG_ON(PageSwapCache(page)); + if (PageTransHuge(page)) { nr_pages <<= compound_order(page); VM_BUG_ON(!PageTransHuge(page)); @@ -4199,18 +4205,6 @@ void mem_cgroup_uncharge_page(struct page *page) if (page_mapped(page)) return; VM_BUG_ON(page->mapping && !PageAnon(page)); - /* - * If the page is in swap cache, uncharge should be deferred - * to the swap path, which also properly accounts swap usage - * and handles memcg lifetime. - * - * Note that this check is not stable and reclaim may add the - * page to swap cache at any time after this. However, if the - * page is not in swap cache by the time page->mapcount hits - * 0, there won't be any page table references to the swap - * slot, and reclaim will free it and not actually write the - * page to disk. - */ if (PageSwapCache(page)) return; __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false); diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index 61a262b08e53..6dc1882fbd72 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -220,6 +220,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) tlb->start = -1UL; tlb->end = 0; tlb->need_flush = 0; + tlb->fast_mode = (num_possible_cpus() == 1); tlb->local.next = NULL; tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); @@ -243,6 +244,9 @@ void tlb_flush_mmu(struct mmu_gather *tlb) tlb_table_flush(tlb); #endif + if (tlb_fast_mode(tlb)) + return; + for (batch = &tlb->local; batch; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; @@ -284,6 +288,11 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) VM_BUG_ON(!tlb->need_flush); + if (tlb_fast_mode(tlb)) { + free_page_and_swap_cache(page); + return 1; /* avoid calling tlb_flush_mmu() */ + } + batch = tlb->active; batch->pages[batch->nr++] = page; if (batch->nr == batch->max) { diff --git a/trunk/mm/memory_hotplug.c b/trunk/mm/memory_hotplug.c index 1ad92b46753e..a221fac1f47d 100644 --- a/trunk/mm/memory_hotplug.c +++ b/trunk/mm/memory_hotplug.c @@ -720,12 +720,9 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, start = phys_start_pfn << PAGE_SHIFT; size = nr_pages * PAGE_SIZE; ret = release_mem_region_adjustable(&iomem_resource, start, size); - if (ret) { - resource_size_t endres = start + size - 1; - - pr_warn("Unable to release resource <%pa-%pa> (%d)\n", - &start, &endres, ret); - } + if (ret) + pr_warn("Unable to release resource <%016llx-%016llx> (%d)\n", + start, start + size - 1, ret); sections_to_remove = nr_pages / PAGES_PER_SECTION; for (i = 0; i < sections_to_remove; i++) { diff --git a/trunk/mm/migrate.c b/trunk/mm/migrate.c index 6f0c24438bba..27ed22579fd9 100644 --- a/trunk/mm/migrate.c +++ b/trunk/mm/migrate.c @@ -165,7 +165,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, pte = arch_make_huge_pte(pte, vma, new, 0); } #endif - flush_dcache_page(new); + flush_cache_page(vma, addr, pte_pfn(pte)); set_pte_at(mm, addr, ptep, pte); if (PageHuge(new)) { @@ -200,14 +200,15 @@ static void remove_migration_ptes(struct page *old, struct page *new) * get to the page and wait until migration is finished. * When we return from this function the fault will be retried. */ -static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, - spinlock_t *ptl) +void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, + unsigned long address) { - pte_t pte; + pte_t *ptep, pte; + spinlock_t *ptl; swp_entry_t entry; struct page *page; - spin_lock(ptl); + ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!is_swap_pte(pte)) goto out; @@ -235,20 +236,6 @@ static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, pte_unmap_unlock(ptep, ptl); } -void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, - unsigned long address) -{ - spinlock_t *ptl = pte_lockptr(mm, pmd); - pte_t *ptep = pte_offset_map(pmd, address); - __migration_entry_wait(mm, ptep, ptl); -} - -void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte) -{ - spinlock_t *ptl = &(mm)->page_table_lock; - __migration_entry_wait(mm, pte, ptl); -} - #ifdef CONFIG_BLOCK /* Returns true if all buffers are successfully locked */ static bool buffer_migrate_lock_buffers(struct buffer_head *head, diff --git a/trunk/mm/mmu_notifier.c b/trunk/mm/mmu_notifier.c index 6725ff183374..be04122fb277 100644 --- a/trunk/mm/mmu_notifier.c +++ b/trunk/mm/mmu_notifier.c @@ -40,44 +40,48 @@ void __mmu_notifier_release(struct mm_struct *mm) int id; /* - * SRCU here will block mmu_notifier_unregister until - * ->release returns. + * srcu_read_lock() here will block synchronize_srcu() in + * mmu_notifier_unregister() until all registered + * ->release() callouts this function makes have + * returned. */ id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) - /* - * If ->release runs before mmu_notifier_unregister it must be - * handled, as it's the only way for the driver to flush all - * existing sptes and stop the driver from establishing any more - * sptes before all the pages in the mm are freed. - */ - if (mn->ops->release) - mn->ops->release(mn, mm); - srcu_read_unlock(&srcu, id); - spin_lock(&mm->mmu_notifier_mm->lock); while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { mn = hlist_entry(mm->mmu_notifier_mm->list.first, struct mmu_notifier, hlist); + /* - * We arrived before mmu_notifier_unregister so - * mmu_notifier_unregister will do nothing other than to wait - * for ->release to finish and for mmu_notifier_unregister to - * return. + * Unlink. This will prevent mmu_notifier_unregister() + * from also making the ->release() callout. */ hlist_del_init_rcu(&mn->hlist); + spin_unlock(&mm->mmu_notifier_mm->lock); + + /* + * Clear sptes. (see 'release' description in mmu_notifier.h) + */ + if (mn->ops->release) + mn->ops->release(mn, mm); + + spin_lock(&mm->mmu_notifier_mm->lock); } spin_unlock(&mm->mmu_notifier_mm->lock); /* - * synchronize_srcu here prevents mmu_notifier_release from returning to - * exit_mmap (which would proceed with freeing all pages in the mm) - * until the ->release method returns, if it was invoked by - * mmu_notifier_unregister. - * - * The mmu_notifier_mm can't go away from under us because one mm_count - * is held by exit_mmap. + * All callouts to ->release() which we have done are complete. + * Allow synchronize_srcu() in mmu_notifier_unregister() to complete + */ + srcu_read_unlock(&srcu, id); + + /* + * mmu_notifier_unregister() may have unlinked a notifier and may + * still be calling out to it. Additionally, other notifiers + * may have been active via vmtruncate() et. al. Block here + * to ensure that all notifier callouts for this mm have been + * completed and the sptes are really cleaned up before returning + * to exit_mmap(). */ synchronize_srcu(&srcu); } @@ -288,34 +292,31 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) { BUG_ON(atomic_read(&mm->mm_count) <= 0); + spin_lock(&mm->mmu_notifier_mm->lock); if (!hlist_unhashed(&mn->hlist)) { - /* - * SRCU here will force exit_mmap to wait for ->release to - * finish before freeing the pages. - */ int id; - id = srcu_read_lock(&srcu); /* - * exit_mmap will block in mmu_notifier_release to guarantee - * that ->release is called before freeing the pages. + * Ensure we synchronize up with __mmu_notifier_release(). */ + id = srcu_read_lock(&srcu); + + hlist_del_rcu(&mn->hlist); + spin_unlock(&mm->mmu_notifier_mm->lock); + if (mn->ops->release) mn->ops->release(mn, mm); - srcu_read_unlock(&srcu, id); - spin_lock(&mm->mmu_notifier_mm->lock); /* - * Can not use list_del_rcu() since __mmu_notifier_release - * can delete it before we hold the lock. + * Allow __mmu_notifier_release() to complete. */ - hlist_del_init_rcu(&mn->hlist); + srcu_read_unlock(&srcu, id); + } else spin_unlock(&mm->mmu_notifier_mm->lock); - } /* - * Wait for any running method to finish, of course including - * ->release if it was run by mmu_notifier_relase instead of us. + * Wait for any running method to finish, including ->release() if it + * was run by __mmu_notifier_release() instead of us. */ synchronize_srcu(&srcu); diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index c3edb624fccf..98cbdf6e5532 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -1628,7 +1628,6 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, long min = mark; long lowmem_reserve = z->lowmem_reserve[classzone_idx]; int o; - long free_cma = 0; free_pages -= (1 << order) - 1; if (alloc_flags & ALLOC_HIGH) @@ -1638,10 +1637,9 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, #ifdef CONFIG_CMA /* If allocation can't use CMA areas don't use free CMA pages */ if (!(alloc_flags & ALLOC_CMA)) - free_cma = zone_page_state(z, NR_FREE_CMA_PAGES); + free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); #endif - - if (free_pages - free_cma <= min + lowmem_reserve) + if (free_pages <= min + lowmem_reserve) return false; for (o = 0; o < order; o++) { /* At the next order, this order's pages become unavailable */ @@ -5160,7 +5158,7 @@ unsigned long free_reserved_area(unsigned long start, unsigned long end, for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) { if (poison) memset((void *)pos, poison, PAGE_SIZE); - free_reserved_page(virt_to_page((void *)pos)); + free_reserved_page(virt_to_page(pos)); } if (pages && s) diff --git a/trunk/mm/pagewalk.c b/trunk/mm/pagewalk.c index 5da2cbcfdbb5..35aa294656cd 100644 --- a/trunk/mm/pagewalk.c +++ b/trunk/mm/pagewalk.c @@ -127,7 +127,28 @@ static int walk_hugetlb_range(struct vm_area_struct *vma, return 0; } +static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk) +{ + struct vm_area_struct *vma; + + /* We don't need vma lookup at all. */ + if (!walk->hugetlb_entry) + return NULL; + + VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); + vma = find_vma(walk->mm, addr); + if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma)) + return vma; + + return NULL; +} + #else /* CONFIG_HUGETLB_PAGE */ +static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk) +{ + return NULL; +} + static int walk_hugetlb_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -177,53 +198,30 @@ int walk_page_range(unsigned long addr, unsigned long end, if (!walk->mm) return -EINVAL; - VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); - pgd = pgd_offset(walk->mm, addr); do { - struct vm_area_struct *vma = NULL; + struct vm_area_struct *vma; next = pgd_addr_end(addr, end); /* - * This function was not intended to be vma based. - * But there are vma special cases to be handled: - * - hugetlb vma's - * - VM_PFNMAP vma's + * handle hugetlb vma individually because pagetable walk for + * the hugetlb page is dependent on the architecture and + * we can't handled it in the same manner as non-huge pages. */ - vma = find_vma(walk->mm, addr); + vma = hugetlb_vma(addr, walk); if (vma) { - /* - * There are no page structures backing a VM_PFNMAP - * range, so do not allow split_huge_page_pmd(). - */ - if ((vma->vm_start <= addr) && - (vma->vm_flags & VM_PFNMAP)) { + if (vma->vm_end < next) next = vma->vm_end; - pgd = pgd_offset(walk->mm, next); - continue; - } /* - * Handle hugetlb vma individually because pagetable - * walk for the hugetlb page is dependent on the - * architecture and we can't handled it in the same - * manner as non-huge pages. + * Hugepage is very tightly coupled with vma, so + * walk through hugetlb entries within a given vma. */ - if (walk->hugetlb_entry && (vma->vm_start <= addr) && - is_vm_hugetlb_page(vma)) { - if (vma->vm_end < next) - next = vma->vm_end; - /* - * Hugepage is very tightly coupled with vma, - * so walk through hugetlb entries within a - * given vma. - */ - err = walk_hugetlb_range(vma, addr, next, walk); - if (err) - break; - pgd = pgd_offset(walk->mm, next); - continue; - } + err = walk_hugetlb_range(vma, addr, next, walk); + if (err) + break; + pgd = pgd_offset(walk->mm, next); + continue; } if (pgd_none_or_clear_bad(pgd)) { diff --git a/trunk/mm/readahead.c b/trunk/mm/readahead.c index 829a77c62834..daed28dd5830 100644 --- a/trunk/mm/readahead.c +++ b/trunk/mm/readahead.c @@ -48,7 +48,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping, if (!trylock_page(page)) BUG(); page->mapping = mapping; - do_invalidatepage(page, 0, PAGE_CACHE_SIZE); + do_invalidatepage(page, 0); page->mapping = NULL; unlock_page(page); } diff --git a/trunk/mm/slab_common.c b/trunk/mm/slab_common.c index 2d414508e9ec..ff3218a0f5e1 100644 --- a/trunk/mm/slab_common.c +++ b/trunk/mm/slab_common.c @@ -373,10 +373,8 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) { int index; - if (size > KMALLOC_MAX_SIZE) { - WARN_ON_ONCE(!(flags & __GFP_NOWARN)); + if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE)) return NULL; - } if (size <= 192) { if (!size) diff --git a/trunk/mm/swap_state.c b/trunk/mm/swap_state.c index f24ab0dff554..b3d40dcf3624 100644 --- a/trunk/mm/swap_state.c +++ b/trunk/mm/swap_state.c @@ -336,24 +336,8 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * Swap entry may have been freed since our caller observed it. */ err = swapcache_prepare(entry); - if (err == -EEXIST) { + if (err == -EEXIST) { /* seems racy */ radix_tree_preload_end(); - /* - * We might race against get_swap_page() and stumble - * across a SWAP_HAS_CACHE swap_map entry whose page - * has not been brought into the swapcache yet, while - * the other end is scheduled away waiting on discard - * I/O completion at scan_swap_map(). - * - * In order to avoid turning this transitory state - * into a permanent loop around this -EEXIST case - * if !CONFIG_PREEMPT and the I/O completion happens - * to be waiting on the CPU waitqueue where we are now - * busy looping, we just conditionally invoke the - * scheduler here, if there are some more important - * tasks to run. - */ - cond_resched(); continue; } if (err) { /* swp entry is obsolete ? */ diff --git a/trunk/mm/swapfile.c b/trunk/mm/swapfile.c index 746af55b8455..6c340d908b27 100644 --- a/trunk/mm/swapfile.c +++ b/trunk/mm/swapfile.c @@ -2116,7 +2116,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) } /* frontswap enabled? set up bit-per-page map for frontswap */ if (frontswap_enabled) - frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long)); + frontswap_map = vzalloc(maxpages / sizeof(long)); if (p->bdev) { if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { diff --git a/trunk/mm/truncate.c b/trunk/mm/truncate.c index e2e8a8a7eb9d..c75b736e54b7 100644 --- a/trunk/mm/truncate.c +++ b/trunk/mm/truncate.c @@ -26,8 +26,7 @@ /** * do_invalidatepage - invalidate part or all of a page * @page: the page which is affected - * @offset: start of the range to invalidate - * @length: length of the range to invalidate + * @offset: the index of the truncation point * * do_invalidatepage() is called when all or part of the page has become * invalidated by a truncate operation. @@ -38,18 +37,24 @@ * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ -void do_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +void do_invalidatepage(struct page *page, unsigned long offset) { - void (*invalidatepage)(struct page *, unsigned int, unsigned int); - + void (*invalidatepage)(struct page *, unsigned long); invalidatepage = page->mapping->a_ops->invalidatepage; #ifdef CONFIG_BLOCK if (!invalidatepage) invalidatepage = block_invalidatepage; #endif if (invalidatepage) - (*invalidatepage)(page, offset, length); + (*invalidatepage)(page, offset); +} + +static inline void truncate_partial_page(struct page *page, unsigned partial) +{ + zero_user_segment(page, partial, PAGE_CACHE_SIZE); + cleancache_invalidate_page(page->mapping, page); + if (page_has_private(page)) + do_invalidatepage(page, partial); } /* @@ -98,7 +103,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) return -EIO; if (page_has_private(page)) - do_invalidatepage(page, 0, PAGE_CACHE_SIZE); + do_invalidatepage(page, 0); cancel_dirty_page(page, PAGE_CACHE_SIZE); @@ -180,11 +185,11 @@ int invalidate_inode_page(struct page *page) * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate * @lstart: offset from which to truncate - * @lend: offset to which to truncate (inclusive) + * @lend: offset to which to truncate * * Truncate the page cache, removing the pages that are between - * specified offsets (and zeroing out partial pages - * if lstart or lend + 1 is not page aligned). + * specified offsets (and zeroing out partial page + * (if lstart is not page aligned)). * * Truncate takes two passes - the first pass is nonblocking. It will not * block on page locks and it will not block on writeback. The second pass @@ -195,58 +200,35 @@ int invalidate_inode_page(struct page *page) * We pass down the cache-hot hint to the page freeing code. Even if the * mapping is large, it is probably the case that the final pages are the most * recently touched, and freeing happens in ascending file offset order. - * - * Note that since ->invalidatepage() accepts range to invalidate - * truncate_inode_pages_range is able to handle cases where lend + 1 is not - * page aligned properly. */ void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart, loff_t lend) { - pgoff_t start; /* inclusive */ - pgoff_t end; /* exclusive */ - unsigned int partial_start; /* inclusive */ - unsigned int partial_end; /* exclusive */ - struct pagevec pvec; - pgoff_t index; - int i; + const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; + const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); + struct pagevec pvec; + pgoff_t index; + pgoff_t end; + int i; cleancache_invalidate_inode(mapping); if (mapping->nrpages == 0) return; - /* Offsets within partial pages */ - partial_start = lstart & (PAGE_CACHE_SIZE - 1); - partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); - - /* - * 'start' and 'end' always covers the range of pages to be fully - * truncated. Partial pages are covered with 'partial_start' at the - * start of the range and 'partial_end' at the end of the range. - * Note that 'end' is exclusive while 'lend' is inclusive. - */ - start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (lend == -1) - /* - * lend == -1 indicates end-of-file so we have to set 'end' - * to the highest possible pgoff_t and since the type is - * unsigned we're using -1. - */ - end = -1; - else - end = (lend + 1) >> PAGE_CACHE_SHIFT; + BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); + end = (lend >> PAGE_CACHE_SHIFT); pagevec_init(&pvec, 0); index = start; - while (index < end && pagevec_lookup(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE))) { + while (index <= end && pagevec_lookup(&pvec, mapping, index, + min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { mem_cgroup_uncharge_start(); for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; /* We rely upon deletion not changing page->index */ index = page->index; - if (index >= end) + if (index > end) break; if (!trylock_page(page)) @@ -265,56 +247,27 @@ void truncate_inode_pages_range(struct address_space *mapping, index++; } - if (partial_start) { + if (partial) { struct page *page = find_lock_page(mapping, start - 1); if (page) { - unsigned int top = PAGE_CACHE_SIZE; - if (start > end) { - /* Truncation within a single page */ - top = partial_end; - partial_end = 0; - } wait_on_page_writeback(page); - zero_user_segment(page, partial_start, top); - cleancache_invalidate_page(mapping, page); - if (page_has_private(page)) - do_invalidatepage(page, partial_start, - top - partial_start); + truncate_partial_page(page, partial); unlock_page(page); page_cache_release(page); } } - if (partial_end) { - struct page *page = find_lock_page(mapping, end); - if (page) { - wait_on_page_writeback(page); - zero_user_segment(page, 0, partial_end); - cleancache_invalidate_page(mapping, page); - if (page_has_private(page)) - do_invalidatepage(page, 0, - partial_end); - unlock_page(page); - page_cache_release(page); - } - } - /* - * If the truncation happened within a single page no pages - * will be released, just zeroed, so we can bail out now. - */ - if (start >= end) - return; index = start; for ( ; ; ) { cond_resched(); if (!pagevec_lookup(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE))) { + min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { if (index == start) break; index = start; continue; } - if (index == start && pvec.pages[0]->index >= end) { + if (index == start && pvec.pages[0]->index > end) { pagevec_release(&pvec); break; } @@ -324,7 +277,7 @@ void truncate_inode_pages_range(struct address_space *mapping, /* We rely upon deletion not changing page->index */ index = page->index; - if (index >= end) + if (index > end) break; lock_page(page); @@ -645,8 +598,10 @@ void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend) * This rounding is currently just for example: unmap_mapping_range * expands its hole outwards, whereas we want it to contract the hole * inwards. However, existing callers of truncate_pagecache_range are - * doing their own page rounding first. Note that unmap_mapping_range - * allows holelen 0 for all, and we allow lend -1 for end of file. + * doing their own page rounding first; and truncate_inode_pages_range + * currently BUGs if lend is not pagealigned-1 (it handles partial + * page at start of hole, but not partial page at end of hole). Note + * unmap_mapping_range allows holelen 0 for all, and we allow lend -1. */ /* diff --git a/trunk/net/802/mrp.c b/trunk/net/802/mrp.c index 1eb05d80b07b..e085bcc754f6 100644 --- a/trunk/net/802/mrp.c +++ b/trunk/net/802/mrp.c @@ -871,10 +871,10 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) */ del_timer_sync(&app->join_timer); - spin_lock_bh(&app->lock); + spin_lock(&app->lock); mrp_mad_event(app, MRP_EVENT_TX); mrp_pdu_queue(app); - spin_unlock_bh(&app->lock); + spin_unlock(&app->lock); mrp_queue_xmit(app); diff --git a/trunk/net/9p/client.c b/trunk/net/9p/client.c index addc116cecf0..8eb75425e6e6 100644 --- a/trunk/net/9p/client.c +++ b/trunk/net/9p/client.c @@ -562,19 +562,36 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req, if (!p9_is_proto_dotl(c)) { /* Error is reported in string format */ - int len; - /* 7 = header size for RERROR; */ - int inline_len = in_hdrlen - 7; + uint16_t len; + /* 7 = header size for RERROR, 2 is the size of string len; */ + int inline_len = in_hdrlen - (7 + 2); - len = req->rc->size - req->rc->offset; - if (len > (P9_ZC_HDR_SZ - 7)) { - err = -EFAULT; + /* Read the size of error string */ + err = p9pdu_readf(req->rc, c->proto_version, "w", &len); + if (err) + goto out_err; + + ename = kmalloc(len + 1, GFP_NOFS); + if (!ename) { + err = -ENOMEM; goto out_err; } + if (len <= inline_len) { + /* We have error in protocol buffer itself */ + if (pdu_read(req->rc, ename, len)) { + err = -EFAULT; + goto out_free; - ename = &req->rc->sdata[req->rc->offset]; - if (len > inline_len) { - /* We have error in external buffer */ + } + } else { + /* + * Part of the data is in user space buffer. + */ + if (pdu_read(req->rc, ename, inline_len)) { + err = -EFAULT; + goto out_free; + + } if (kern_buf) { memcpy(ename + inline_len, uidata, len - inline_len); @@ -583,19 +600,19 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req, uidata, len - inline_len); if (err) { err = -EFAULT; - goto out_err; + goto out_free; } } } - ename = NULL; - err = p9pdu_readf(req->rc, c->proto_version, "s?d", - &ename, &ecode); - if (err) - goto out_err; - - if (p9_is_proto_dotu(c)) + ename[len] = 0; + if (p9_is_proto_dotu(c)) { + /* For dotu we also have error code */ + err = p9pdu_readf(req->rc, + c->proto_version, "d", &ecode); + if (err) + goto out_free; err = -ecode; - + } if (!err || !IS_ERR_VALUE(err)) { err = p9_errstr2errno(ename, strlen(ename)); @@ -611,6 +628,8 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req, } return err; +out_free: + kfree(ename); out_err: p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err); return err; diff --git a/trunk/net/batman-adv/bat_iv_ogm.c b/trunk/net/batman-adv/bat_iv_ogm.c index f680ee101878..071f288b77a8 100644 --- a/trunk/net/batman-adv/bat_iv_ogm.c +++ b/trunk/net/batman-adv/bat_iv_ogm.c @@ -29,21 +29,6 @@ #include "bat_algo.h" #include "network-coding.h" -/** - * batadv_dup_status - duplicate status - * @BATADV_NO_DUP: the packet is a duplicate - * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the - * neighbor) - * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor - * @BATADV_PROTECTED: originator is currently protected (after reboot) - */ -enum batadv_dup_status { - BATADV_NO_DUP = 0, - BATADV_ORIG_DUP, - BATADV_NEIGH_DUP, - BATADV_PROTECTED, -}; - static struct batadv_neigh_node * batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, const uint8_t *neigh_addr, @@ -665,7 +650,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, const struct batadv_ogm_packet *batadv_ogm_packet, struct batadv_hard_iface *if_incoming, const unsigned char *tt_buff, - enum batadv_dup_status dup_status) + int is_duplicate) { struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; struct batadv_neigh_node *router = NULL; @@ -691,7 +676,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, continue; } - if (dup_status != BATADV_NO_DUP) + if (is_duplicate) continue; spin_lock_bh(&tmp_neigh_node->lq_update_lock); @@ -733,7 +718,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv); spin_unlock_bh(&neigh_node->lq_update_lock); - if (dup_status == BATADV_NO_DUP) { + if (!is_duplicate) { orig_node->last_ttl = batadv_ogm_packet->header.ttl; neigh_node->last_ttl = batadv_ogm_packet->header.ttl; } @@ -917,16 +902,15 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, return ret; } -/** - * batadv_iv_ogm_update_seqnos - process a batman packet for all interfaces, - * adjust the sequence number and find out whether it is a duplicate - * @ethhdr: ethernet header of the packet - * @batadv_ogm_packet: OGM packet to be considered - * @if_incoming: interface on which the OGM packet was received - * - * Returns duplicate status as enum batadv_dup_status +/* processes a batman packet for all interfaces, adjusts the sequence number and + * finds out whether it is a duplicate. + * returns: + * 1 the packet is a duplicate + * 0 the packet has not yet been received + * -1 the packet is old and has been received while the seqno window + * was protected. Caller should drop it. */ -static enum batadv_dup_status +static int batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, const struct batadv_ogm_packet *batadv_ogm_packet, const struct batadv_hard_iface *if_incoming) @@ -934,18 +918,17 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_orig_node *orig_node; struct batadv_neigh_node *tmp_neigh_node; - int is_dup; + int is_duplicate = 0; int32_t seq_diff; int need_update = 0; - int set_mark; - enum batadv_dup_status ret = BATADV_NO_DUP; + int set_mark, ret = -1; uint32_t seqno = ntohl(batadv_ogm_packet->seqno); uint8_t *neigh_addr; uint8_t packet_count; orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig); if (!orig_node) - return BATADV_NO_DUP; + return 0; spin_lock_bh(&orig_node->ogm_cnt_lock); seq_diff = seqno - orig_node->last_real_seqno; @@ -953,29 +936,22 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, /* signalize caller that the packet is to be dropped. */ if (!hlist_empty(&orig_node->neigh_list) && batadv_window_protected(bat_priv, seq_diff, - &orig_node->batman_seqno_reset)) { - ret = BATADV_PROTECTED; + &orig_node->batman_seqno_reset)) goto out; - } rcu_read_lock(); hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { - neigh_addr = tmp_neigh_node->addr; - is_dup = batadv_test_bit(tmp_neigh_node->real_bits, - orig_node->last_real_seqno, - seqno); + is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, + orig_node->last_real_seqno, + seqno); + neigh_addr = tmp_neigh_node->addr; if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && - tmp_neigh_node->if_incoming == if_incoming) { + tmp_neigh_node->if_incoming == if_incoming) set_mark = 1; - if (is_dup) - ret = BATADV_NEIGH_DUP; - } else { + else set_mark = 0; - if (is_dup && (ret != BATADV_NEIGH_DUP)) - ret = BATADV_ORIG_DUP; - } /* if the window moved, set the update flag. */ need_update |= batadv_bit_get_packet(bat_priv, @@ -995,6 +971,8 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, orig_node->last_real_seqno = seqno; } + ret = is_duplicate; + out: spin_unlock_bh(&orig_node->ogm_cnt_lock); batadv_orig_node_free_ref(orig_node); @@ -1016,8 +994,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, int is_broadcast = 0, is_bidirect; bool is_single_hop_neigh = false; bool is_from_best_next_hop = false; - int sameseq, similar_ttl; - enum batadv_dup_status dup_status; + int is_duplicate, sameseq, simlar_ttl; uint32_t if_incoming_seqno; uint8_t *prev_sender; @@ -1161,10 +1138,10 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, if (!orig_node) return; - dup_status = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet, - if_incoming); + is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet, + if_incoming); - if (dup_status == BATADV_PROTECTED) { + if (is_duplicate == -1) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: packet within seqno protection time (sender: %pM)\n", ethhdr->h_source); @@ -1234,12 +1211,11 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, * seqno and similar ttl as the non-duplicate */ sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); - similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; - if (is_bidirect && ((dup_status == BATADV_NO_DUP) || - (sameseq && similar_ttl))) + simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; + if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl))) batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, batadv_ogm_packet, if_incoming, - tt_buff, dup_status); + tt_buff, is_duplicate); /* is single hop (direct) neighbor */ if (is_single_hop_neigh) { @@ -1260,7 +1236,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, goto out_neigh; } - if (dup_status == BATADV_NEIGH_DUP) { + if (is_duplicate) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: duplicate packet received\n"); goto out_neigh; diff --git a/trunk/net/batman-adv/bridge_loop_avoidance.c b/trunk/net/batman-adv/bridge_loop_avoidance.c index de27b3175cfd..379061c72549 100644 --- a/trunk/net/batman-adv/bridge_loop_avoidance.c +++ b/trunk/net/batman-adv/bridge_loop_avoidance.c @@ -1067,10 +1067,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); bat_priv->bla.claim_dest.group = group; - /* purge everything when bridge loop avoidance is turned off */ - if (!atomic_read(&bat_priv->bridge_loop_avoidance)) - oldif = NULL; - if (!oldif) { batadv_bla_purge_claims(bat_priv, NULL, 1); batadv_bla_purge_backbone_gw(bat_priv, 1); diff --git a/trunk/net/batman-adv/distributed-arp-table.c b/trunk/net/batman-adv/distributed-arp-table.c index 239992021b1d..8e15d966d9b0 100644 --- a/trunk/net/batman-adv/distributed-arp-table.c +++ b/trunk/net/batman-adv/distributed-arp-table.c @@ -837,19 +837,6 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst); if (dat_entry) { - /* If the ARP request is destined for a local client the local - * client will answer itself. DAT would only generate a - * duplicate packet. - * - * Moreover, if the soft-interface is enslaved into a bridge, an - * additional DAT answer may trigger kernel warnings about - * a packet coming from the wrong port. - */ - if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) { - ret = true; - goto out; - } - skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, bat_priv->soft_iface, ip_dst, hw_src, dat_entry->mac_addr, hw_src); diff --git a/trunk/net/batman-adv/main.c b/trunk/net/batman-adv/main.c index 51aafd669cbb..3e30a0f1b908 100644 --- a/trunk/net/batman-adv/main.c +++ b/trunk/net/batman-adv/main.c @@ -163,25 +163,16 @@ void batadv_mesh_free(struct net_device *soft_iface) batadv_vis_quit(bat_priv); batadv_gw_node_purge(bat_priv); + batadv_originator_free(bat_priv); batadv_nc_free(bat_priv); - batadv_dat_free(bat_priv); - batadv_bla_free(bat_priv); - /* Free the TT and the originator tables only after having terminated - * all the other depending components which may use these structures for - * their purposes. - */ batadv_tt_free(bat_priv); - /* Since the originator table clean up routine is accessing the TT - * tables as well, it has to be invoked after the TT tables have been - * freed and marked as empty. This ensures that no cleanup RCU callbacks - * accessing the TT data are scheduled for later execution. - */ - batadv_originator_free(bat_priv); + batadv_bla_free(bat_priv); + + batadv_dat_free(bat_priv); free_percpu(bat_priv->bat_counters); - bat_priv->bat_counters = NULL; atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); } @@ -484,7 +475,7 @@ static int batadv_param_set_ra(const char *val, const struct kernel_param *kp) char *algo_name = (char *)val; size_t name_len = strlen(algo_name); - if (name_len > 0 && algo_name[name_len - 1] == '\n') + if (algo_name[name_len - 1] == '\n') algo_name[name_len - 1] = '\0'; bat_algo_ops = batadv_algo_get(algo_name); diff --git a/trunk/net/batman-adv/network-coding.c b/trunk/net/batman-adv/network-coding.c index e84629ece9b7..f7c54305a918 100644 --- a/trunk/net/batman-adv/network-coding.c +++ b/trunk/net/batman-adv/network-coding.c @@ -1514,7 +1514,6 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, struct ethhdr *ethhdr, ethhdr_tmp; uint8_t *orig_dest, ttl, ttvn; unsigned int coding_len; - int err; /* Save headers temporarily */ memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp)); @@ -1569,11 +1568,8 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, coding_len); /* Resize decoded skb if decoded with larger packet */ - if (nc_packet->skb->len > coding_len + h_size) { - err = pskb_trim_rcsum(skb, coding_len + h_size); - if (err) - return NULL; - } + if (nc_packet->skb->len > coding_len + h_size) + pskb_trim_rcsum(skb, coding_len + h_size); /* Create decoded unicast packet */ unicast_packet = (struct batadv_unicast_packet *)skb->data; diff --git a/trunk/net/batman-adv/originator.c b/trunk/net/batman-adv/originator.c index fad1a2093e15..2f3452546636 100644 --- a/trunk/net/batman-adv/originator.c +++ b/trunk/net/batman-adv/originator.c @@ -156,28 +156,12 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) kfree(orig_node); } -/** - * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly - * schedule an rcu callback for freeing it - * @orig_node: the orig node to free - */ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node) { if (atomic_dec_and_test(&orig_node->refcount)) call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); } -/** - * batadv_orig_node_free_ref_now - decrement the orig node refcounter and - * possibly free it (without rcu callback) - * @orig_node: the orig node to free - */ -void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node) -{ - if (atomic_dec_and_test(&orig_node->refcount)) - batadv_orig_node_free_rcu(&orig_node->rcu); -} - void batadv_originator_free(struct batadv_priv *bat_priv) { struct batadv_hashtable *hash = bat_priv->orig_hash; diff --git a/trunk/net/batman-adv/originator.h b/trunk/net/batman-adv/originator.h index 734e5a3d8a5b..7df48fa7669d 100644 --- a/trunk/net/batman-adv/originator.h +++ b/trunk/net/batman-adv/originator.h @@ -26,7 +26,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv); void batadv_originator_free(struct batadv_priv *bat_priv); void batadv_purge_orig_ref(struct batadv_priv *bat_priv); void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node); -void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node); struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv, const uint8_t *addr); struct batadv_neigh_node * diff --git a/trunk/net/batman-adv/soft-interface.c b/trunk/net/batman-adv/soft-interface.c index 819dfb006cdf..6f20d339e33a 100644 --- a/trunk/net/batman-adv/soft-interface.c +++ b/trunk/net/batman-adv/soft-interface.c @@ -505,7 +505,6 @@ static int batadv_softif_init_late(struct net_device *dev) batadv_debugfs_del_meshif(dev); free_bat_counters: free_percpu(bat_priv->bat_counters); - bat_priv->bat_counters = NULL; return ret; } diff --git a/trunk/net/batman-adv/sysfs.c b/trunk/net/batman-adv/sysfs.c index 929e304dacb2..15a22efa9a67 100644 --- a/trunk/net/batman-adv/sysfs.c +++ b/trunk/net/batman-adv/sysfs.c @@ -582,7 +582,10 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj, (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) goto out; - rtnl_lock(); + if (!rtnl_trylock()) { + ret = -ERESTARTSYS; + goto out; + } if (status_tmp == BATADV_IF_NOT_IN_USE) { batadv_hardif_disable_interface(hard_iface, diff --git a/trunk/net/batman-adv/translation-table.c b/trunk/net/batman-adv/translation-table.c index 9e8748575845..5e89deeb9542 100644 --- a/trunk/net/batman-adv/translation-table.c +++ b/trunk/net/batman-adv/translation-table.c @@ -144,12 +144,7 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu) struct batadv_tt_orig_list_entry *orig_entry; orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu); - - /* We are in an rcu callback here, therefore we cannot use - * batadv_orig_node_free_ref() and its call_rcu(): - * An rcu_barrier() wouldn't wait for that to finish - */ - batadv_orig_node_free_ref_now(orig_entry->orig_node); + batadv_orig_node_free_ref(orig_entry->orig_node); kfree(orig_entry); } diff --git a/trunk/net/bluetooth/hci_core.c b/trunk/net/bluetooth/hci_core.c index ace5e55fe5a3..33843c5c4939 100644 --- a/trunk/net/bluetooth/hci_core.c +++ b/trunk/net/bluetooth/hci_core.c @@ -341,6 +341,7 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt) static void bredr_setup(struct hci_request *req) { + struct hci_cp_delete_stored_link_key cp; __le16 param; __u8 flt_type; @@ -364,6 +365,10 @@ static void bredr_setup(struct hci_request *req) param = __constant_cpu_to_le16(0x7d00); hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); + bacpy(&cp.bdaddr, BDADDR_ANY); + cp.delete_all = 0x01; + hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp); + /* Read page scan parameters */ if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) { hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); @@ -597,16 +602,6 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt) struct hci_dev *hdev = req->hdev; u8 p; - /* Only send HCI_Delete_Stored_Link_Key if it is supported */ - if (hdev->commands[6] & 0x80) { - struct hci_cp_delete_stored_link_key cp; - - bacpy(&cp.bdaddr, BDADDR_ANY); - cp.delete_all = 0x01; - hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, - sizeof(cp), &cp); - } - if (hdev->commands[5] & 0x10) hci_setup_link_policy(req); @@ -1560,15 +1555,11 @@ static const struct rfkill_ops hci_rfkill_ops = { static void hci_power_on(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); - int err; BT_DBG("%s", hdev->name); - err = hci_dev_open(hdev->id); - if (err < 0) { - mgmt_set_powered_failed(hdev, err); + if (hci_dev_open(hdev->id) < 0) return; - } if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) queue_delayed_work(hdev->req_workqueue, &hdev->power_off, diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c index 68843a28a7af..a76d1ac0321b 100644 --- a/trunk/net/bluetooth/l2cap_core.c +++ b/trunk/net/bluetooth/l2cap_core.c @@ -2852,9 +2852,6 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", conn, code, ident, dlen); - if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) - return NULL; - len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; count = min_t(unsigned int, conn->mtu, len); @@ -3680,14 +3677,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) } static inline int l2cap_command_rej(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, - u8 *data) + struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; - if (cmd_len < sizeof(*rej)) - return -EPROTO; - if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) return 0; @@ -3836,14 +3829,11 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, } static int l2cap_connect_req(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) + struct l2cap_cmd_hdr *cmd, u8 *data) { struct hci_dev *hdev = conn->hcon->hdev; struct hci_conn *hcon = conn->hcon; - if (cmd_len < sizeof(struct l2cap_conn_req)) - return -EPROTO; - hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags) && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) @@ -3857,8 +3847,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn, } static int l2cap_connect_create_rsp(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, - u8 *data) + struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; u16 scid, dcid, result, status; @@ -3866,9 +3855,6 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, u8 req[128]; int err; - if (cmd_len < sizeof(*rsp)) - return -EPROTO; - scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); result = __le16_to_cpu(rsp->result); @@ -3966,9 +3952,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_chan *chan; int len, err = 0; - if (cmd_len < sizeof(*req)) - return -EPROTO; - dcid = __le16_to_cpu(req->dcid); flags = __le16_to_cpu(req->flags); @@ -3992,7 +3975,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, /* Reject if config buffer is too small. */ len = cmd_len - sizeof(*req); - if (chan->conf_len + len > sizeof(chan->conf_req)) { + if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, L2CAP_CONF_REJECT, flags), rsp); @@ -4070,18 +4053,14 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, } static inline int l2cap_config_rsp(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, - u8 *data) + struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; u16 scid, flags, result; struct l2cap_chan *chan; - int len = cmd_len - sizeof(*rsp); + int len = le16_to_cpu(cmd->len) - sizeof(*rsp); int err = 0; - if (cmd_len < sizeof(*rsp)) - return -EPROTO; - scid = __le16_to_cpu(rsp->scid); flags = __le16_to_cpu(rsp->flags); result = __le16_to_cpu(rsp->result); @@ -4182,8 +4161,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, } static inline int l2cap_disconnect_req(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, - u8 *data) + struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; struct l2cap_disconn_rsp rsp; @@ -4191,9 +4169,6 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_chan *chan; struct sock *sk; - if (cmd_len != sizeof(*req)) - return -EPROTO; - scid = __le16_to_cpu(req->scid); dcid = __le16_to_cpu(req->dcid); @@ -4233,16 +4208,12 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, } static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, - u8 *data) + struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; u16 dcid, scid; struct l2cap_chan *chan; - if (cmd_len != sizeof(*rsp)) - return -EPROTO; - scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); @@ -4272,15 +4243,11 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, } static inline int l2cap_information_req(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, - u8 *data) + struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_info_req *req = (struct l2cap_info_req *) data; u16 type; - if (cmd_len != sizeof(*req)) - return -EPROTO; - type = __le16_to_cpu(req->type); BT_DBG("type 0x%4.4x", type); @@ -4327,15 +4294,11 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, } static inline int l2cap_information_rsp(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, u16 cmd_len, - u8 *data) + struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; u16 type, result; - if (cmd_len < sizeof(*rsp)) - return -EPROTO; - type = __le16_to_cpu(rsp->type); result = __le16_to_cpu(rsp->result); @@ -5201,16 +5164,16 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, switch (cmd->code) { case L2CAP_COMMAND_REJ: - l2cap_command_rej(conn, cmd, cmd_len, data); + l2cap_command_rej(conn, cmd, data); break; case L2CAP_CONN_REQ: - err = l2cap_connect_req(conn, cmd, cmd_len, data); + err = l2cap_connect_req(conn, cmd, data); break; case L2CAP_CONN_RSP: case L2CAP_CREATE_CHAN_RSP: - err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data); + err = l2cap_connect_create_rsp(conn, cmd, data); break; case L2CAP_CONF_REQ: @@ -5218,15 +5181,15 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, break; case L2CAP_CONF_RSP: - err = l2cap_config_rsp(conn, cmd, cmd_len, data); + err = l2cap_config_rsp(conn, cmd, data); break; case L2CAP_DISCONN_REQ: - err = l2cap_disconnect_req(conn, cmd, cmd_len, data); + err = l2cap_disconnect_req(conn, cmd, data); break; case L2CAP_DISCONN_RSP: - err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data); + err = l2cap_disconnect_rsp(conn, cmd, data); break; case L2CAP_ECHO_REQ: @@ -5237,11 +5200,11 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, break; case L2CAP_INFO_REQ: - err = l2cap_information_req(conn, cmd, cmd_len, data); + err = l2cap_information_req(conn, cmd, data); break; case L2CAP_INFO_RSP: - err = l2cap_information_rsp(conn, cmd, cmd_len, data); + err = l2cap_information_rsp(conn, cmd, data); break; case L2CAP_CREATE_CHAN_REQ: diff --git a/trunk/net/bluetooth/mgmt.c b/trunk/net/bluetooth/mgmt.c index f8ecbc70293d..35fef22703e9 100644 --- a/trunk/net/bluetooth/mgmt.c +++ b/trunk/net/bluetooth/mgmt.c @@ -2700,7 +2700,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, break; case DISCOV_TYPE_LE: - if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + if (!lmp_host_le_capable(hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, MGMT_STATUS_NOT_SUPPORTED); mgmt_pending_remove(cmd); @@ -3418,27 +3418,6 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) return err; } -int mgmt_set_powered_failed(struct hci_dev *hdev, int err) -{ - struct pending_cmd *cmd; - u8 status; - - cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); - if (!cmd) - return -ENOENT; - - if (err == -ERFKILL) - status = MGMT_STATUS_RFKILLED; - else - status = MGMT_STATUS_FAILED; - - err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); - - mgmt_pending_remove(cmd); - - return err; -} - int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) { struct cmd_lookup match = { NULL, hdev }; diff --git a/trunk/net/bluetooth/smp.c b/trunk/net/bluetooth/smp.c index b5562abdd6e0..b2296d3857a0 100644 --- a/trunk/net/bluetooth/smp.c +++ b/trunk/net/bluetooth/smp.c @@ -770,7 +770,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); - if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) + if (!lmp_host_le_capable(hcon->hdev)) return 1; if (sec_level == BT_SECURITY_LOW) @@ -851,7 +851,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) __u8 reason; int err = 0; - if (!test_bit(HCI_LE_ENABLED, &conn->hcon->hdev->dev_flags)) { + if (!lmp_host_le_capable(conn->hcon->hdev)) { err = -ENOTSUPP; reason = SMP_PAIRING_NOTSUPP; goto done; diff --git a/trunk/net/bridge/br_multicast.c b/trunk/net/bridge/br_multicast.c index d6448e35e027..81f2389f78eb 100644 --- a/trunk/net/bridge/br_multicast.c +++ b/trunk/net/bridge/br_multicast.c @@ -465,9 +465,8 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, skb_set_transport_header(skb, skb->len); mldq = (struct mld_msg *) icmp6_hdr(skb); - interval = ipv6_addr_any(group) ? - br->multicast_query_response_interval : - br->multicast_last_member_interval; + interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : + br->multicast_query_response_interval; mldq->mld_type = ICMPV6_MGM_QUERY; mldq->mld_code = 0; diff --git a/trunk/net/bridge/netfilter/ebt_log.c b/trunk/net/bridge/netfilter/ebt_log.c index 19c37a4929bc..9878eb8204c5 100644 --- a/trunk/net/bridge/netfilter/ebt_log.c +++ b/trunk/net/bridge/netfilter/ebt_log.c @@ -72,12 +72,13 @@ print_ports(const struct sk_buff *skb, uint8_t protocol, int offset) } static void -ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, - const struct sk_buff *skb, const struct net_device *in, - const struct net_device *out, const struct nf_loginfo *loginfo, - const char *prefix) +ebt_log_packet(u_int8_t pf, unsigned int hooknum, + const struct sk_buff *skb, const struct net_device *in, + const struct net_device *out, const struct nf_loginfo *loginfo, + const char *prefix) { unsigned int bitmask; + struct net *net = dev_net(in ? in : out); /* FIXME: Disabled from containers until syslog ns is supported */ if (!net_eq(net, &init_net)) @@ -190,7 +191,7 @@ ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par) nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb, par->in, par->out, &li, "%s", info->prefix); else - ebt_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb, par->in, + ebt_log_packet(NFPROTO_BRIDGE, par->hooknum, skb, par->in, par->out, &li, info->prefix); return EBT_CONTINUE; } diff --git a/trunk/net/bridge/netfilter/ebt_ulog.c b/trunk/net/bridge/netfilter/ebt_ulog.c index df0364aa12d5..fc1905c51417 100644 --- a/trunk/net/bridge/netfilter/ebt_ulog.c +++ b/trunk/net/bridge/netfilter/ebt_ulog.c @@ -131,16 +131,14 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size) return skb; } -static void ebt_ulog_packet(struct net *net, unsigned int hooknr, - const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - const struct ebt_ulog_info *uloginfo, - const char *prefix) +static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + const struct ebt_ulog_info *uloginfo, const char *prefix) { ebt_ulog_packet_msg_t *pm; size_t size, copy_len; struct nlmsghdr *nlh; + struct net *net = dev_net(in ? in : out); struct ebt_ulog_net *ebt = ebt_ulog_pernet(net); unsigned int group = uloginfo->nlgroup; ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group]; @@ -235,7 +233,7 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr, } /* this function is registered with the netfilter core */ -static void ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, +static void ebt_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li, const char *prefix) @@ -254,15 +252,13 @@ static void ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix)); } - ebt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix); + ebt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); } static unsigned int ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par) { - struct net *net = dev_net(par->in ? par->in : par->out); - - ebt_ulog_packet(net, par->hooknum, skb, par->in, par->out, + ebt_ulog_packet(par->hooknum, skb, par->in, par->out, par->targinfo, NULL); return EBT_CONTINUE; } diff --git a/trunk/net/ceph/osd_client.c b/trunk/net/ceph/osd_client.c index 3a246a6cab47..a3395fdfbd4f 100644 --- a/trunk/net/ceph/osd_client.c +++ b/trunk/net/ceph/osd_client.c @@ -1204,7 +1204,6 @@ void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, mutex_lock(&osdc->request_mutex); if (req->r_linger) { __unregister_linger_request(osdc, req); - req->r_linger = 0; ceph_osdc_put_request(req); } mutex_unlock(&osdc->request_mutex); @@ -1675,13 +1674,13 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) __register_request(osdc, req); __unregister_linger_request(osdc, req); } - reset_changed_osds(osdc); mutex_unlock(&osdc->request_mutex); if (needmap) { dout("%d requests for down osds, need new map\n", needmap); ceph_monc_request_next_osdmap(&osdc->client->monc); } + reset_changed_osds(osdc); } @@ -2121,9 +2120,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, down_read(&osdc->map_sem); mutex_lock(&osdc->request_mutex); __register_request(osdc, req); - req->r_sent = 0; - req->r_got_reply = 0; - req->r_completed = 0; + WARN_ON(req->r_sent); rc = __map_request(osdc, req, 0); if (rc < 0) { if (nofail) { diff --git a/trunk/net/compat.c b/trunk/net/compat.c index f0a1ba6c8086..79ae88485001 100644 --- a/trunk/net/compat.c +++ b/trunk/net/compat.c @@ -734,25 +734,19 @@ static unsigned char nas[21] = { asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) { - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; - return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags) { - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT); } asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) { - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; - return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags) @@ -774,9 +768,6 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, int datagrams; struct timespec ktspec; - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; - if (COMPAT_USE_64BIT_TIME) return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index faebb398fb46..fc1e289397f5 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -791,40 +791,6 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex) } EXPORT_SYMBOL(dev_get_by_index); -/** - * netdev_get_name - get a netdevice name, knowing its ifindex. - * @net: network namespace - * @name: a pointer to the buffer where the name will be stored. - * @ifindex: the ifindex of the interface to get the name from. - * - * The use of raw_seqcount_begin() and cond_resched() before - * retrying is required as we want to give the writers a chance - * to complete when CONFIG_PREEMPT is not set. - */ -int netdev_get_name(struct net *net, char *name, int ifindex) -{ - struct net_device *dev; - unsigned int seq; - -retry: - seq = raw_seqcount_begin(&devnet_rename_seq); - rcu_read_lock(); - dev = dev_get_by_index_rcu(net, ifindex); - if (!dev) { - rcu_read_unlock(); - return -ENODEV; - } - - strcpy(name, dev->name); - rcu_read_unlock(); - if (read_seqcount_retry(&devnet_rename_seq, seq)) { - cond_resched(); - goto retry; - } - - return 0; -} - /** * dev_getbyhwaddr_rcu - find a device by its hardware address * @net: the applicable net namespace diff --git a/trunk/net/core/dev_addr_lists.c b/trunk/net/core/dev_addr_lists.c index 6cda4e2c2132..c013f38482a1 100644 --- a/trunk/net/core/dev_addr_lists.c +++ b/trunk/net/core/dev_addr_lists.c @@ -39,7 +39,6 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, ha->refcount = 1; ha->global_use = global; ha->synced = sync; - ha->sync_cnt = 0; list_add_tail_rcu(&ha->list, &list->list); list->count++; @@ -67,7 +66,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, } if (sync) { if (ha->synced) - return -EEXIST; + return 0; else ha->synced = true; } @@ -140,13 +139,10 @@ static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list, err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type, false, true); - if (err && err != -EEXIST) + if (err) return err; - - if (!err) { - ha->sync_cnt++; - ha->refcount++; - } + ha->sync_cnt++; + ha->refcount++; return 0; } @@ -163,8 +159,7 @@ static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list, if (err) return; ha->sync_cnt--; - /* address on from list is not marked synced */ - __hw_addr_del_entry(from_list, ha, false, false); + __hw_addr_del_entry(from_list, ha, false, true); } static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list, @@ -801,7 +796,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from) return -EINVAL; netif_addr_lock_nested(to); - err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len); + err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); if (!err) __dev_set_rx_mode(to); netif_addr_unlock(to); diff --git a/trunk/net/core/dev_ioctl.c b/trunk/net/core/dev_ioctl.c index 5b7d0e1d0664..6cc0481faade 100644 --- a/trunk/net/core/dev_ioctl.c +++ b/trunk/net/core/dev_ioctl.c @@ -19,8 +19,9 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) { + struct net_device *dev; struct ifreq ifr; - int error; + unsigned seq; /* * Fetch the caller's info block. @@ -29,9 +30,19 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; - error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex); - if (error) - return error; +retry: + seq = read_seqcount_begin(&devnet_rename_seq); + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); + if (!dev) { + rcu_read_unlock(); + return -ENODEV; + } + + strcpy(ifr.ifr_name, dev->name); + rcu_read_unlock(); + if (read_seqcount_retry(&devnet_rename_seq, seq)) + goto retry; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) return -EFAULT; diff --git a/trunk/net/core/ethtool.c b/trunk/net/core/ethtool.c index ce91766eeca9..22efdaa76ebf 100644 --- a/trunk/net/core/ethtool.c +++ b/trunk/net/core/ethtool.c @@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", [NETIF_F_HIGHDMA_BIT] = "highdma", [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", - [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert", + [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-ctag-hw-insert", - [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse", - [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter", + [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-ctag-hw-parse", + [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter", [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert", [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse", [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter", diff --git a/trunk/net/core/filter.c b/trunk/net/core/filter.c index 6438f29ff266..dad2a178f9f8 100644 --- a/trunk/net/core/filter.c +++ b/trunk/net/core/filter.c @@ -778,7 +778,7 @@ int sk_detach_filter(struct sock *sk) } EXPORT_SYMBOL_GPL(sk_detach_filter); -void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) +static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) { static const u16 decodes[] = { [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K, diff --git a/trunk/net/core/iovec.c b/trunk/net/core/iovec.c index de178e462682..7e7aeb01de45 100644 --- a/trunk/net/core/iovec.c +++ b/trunk/net/core/iovec.c @@ -73,6 +73,31 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a return err; } +/* + * Copy kernel to iovec. Returns -EFAULT on error. + * + * Note: this modifies the original iovec. + */ + +int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) +{ + while (len > 0) { + if (iov->iov_len) { + int copy = min_t(unsigned int, iov->iov_len, len); + if (copy_to_user(iov->iov_base, kdata, copy)) + return -EFAULT; + kdata += copy; + len -= copy; + iov->iov_len -= copy; + iov->iov_base += copy; + } + iov++; + } + + return 0; +} +EXPORT_SYMBOL(memcpy_toiovec); + /* * Copy kernel to iovec. Returns -EFAULT on error. */ @@ -99,6 +124,31 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, } EXPORT_SYMBOL(memcpy_toiovecend); +/* + * Copy iovec to kernel. Returns -EFAULT on error. + * + * Note: this modifies the original iovec. + */ + +int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) +{ + while (len > 0) { + if (iov->iov_len) { + int copy = min_t(unsigned int, len, iov->iov_len); + if (copy_from_user(kdata, iov->iov_base, copy)) + return -EFAULT; + len -= copy; + kdata += copy; + iov->iov_base += copy; + iov->iov_len -= copy; + } + iov++; + } + + return 0; +} +EXPORT_SYMBOL(memcpy_fromiovec); + /* * Copy iovec from kernel. Returns -EFAULT on error. */ diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c index 1c1738cc4538..af9185d0be6a 100644 --- a/trunk/net/core/skbuff.c +++ b/trunk/net/core/skbuff.c @@ -195,7 +195,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); - skb->head = NULL; + skb->data = NULL; skb->truesize = sizeof(struct sk_buff); atomic_set(&skb->users, 1); @@ -483,8 +483,15 @@ EXPORT_SYMBOL(skb_add_rx_frag); static void skb_drop_list(struct sk_buff **listp) { - kfree_skb_list(*listp); + struct sk_buff *list = *listp; + *listp = NULL; + + do { + struct sk_buff *this = list; + list = list->next; + kfree_skb(this); + } while (list); } static inline void skb_drop_fraglist(struct sk_buff *skb) @@ -604,7 +611,7 @@ static void skb_release_head_state(struct sk_buff *skb) static void skb_release_all(struct sk_buff *skb) { skb_release_head_state(skb); - if (likely(skb->head)) + if (likely(skb->data)) skb_release_data(skb); } @@ -644,17 +651,6 @@ void kfree_skb(struct sk_buff *skb) } EXPORT_SYMBOL(kfree_skb); -void kfree_skb_list(struct sk_buff *segs) -{ - while (segs) { - struct sk_buff *next = segs->next; - - kfree_skb(segs); - segs = next; - } -} -EXPORT_SYMBOL(kfree_skb_list); - /** * skb_tx_error - report an sk_buff xmit error * @skb: buffer that triggered an error diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c index d6d024cfaaaf..d4f4cea726e7 100644 --- a/trunk/net/core/sock.c +++ b/trunk/net/core/sock.c @@ -210,7 +210,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = { "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , - "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX" + "sk_lock-AF_NFC" , "sk_lock-AF_MAX" }; static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , @@ -226,7 +226,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , - "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX" + "slock-AF_NFC" , "slock-AF_MAX" }; static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , @@ -242,7 +242,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , - "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX" + "clock-AF_NFC" , "clock-AF_MAX" }; /* @@ -571,7 +571,9 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval, int ret = -ENOPROTOOPT; #ifdef CONFIG_NETDEVICES struct net *net = sock_net(sk); + struct net_device *dev; char devname[IFNAMSIZ]; + unsigned seq; if (sk->sk_bound_dev_if == 0) { len = 0; @@ -582,9 +584,20 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval, if (len < IFNAMSIZ) goto out; - ret = netdev_get_name(net, devname, sk->sk_bound_dev_if); - if (ret) +retry: + seq = read_seqcount_begin(&devnet_rename_seq); + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); + ret = -ENODEV; + if (!dev) { + rcu_read_unlock(); goto out; + } + + strcpy(devname, dev->name); + rcu_read_unlock(); + if (read_seqcount_retry(&devnet_rename_seq, seq)) + goto retry; len = strlen(devname) + 1; @@ -1204,6 +1217,18 @@ static void sock_copy(struct sock *nsk, const struct sock *osk) #endif } +/* + * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes + * un-modified. Special care is taken when initializing object to zero. + */ +static inline void sk_prot_clear_nulls(struct sock *sk, int size) +{ + if (offsetof(struct sock, sk_node.next) != 0) + memset(sk, 0, offsetof(struct sock, sk_node.next)); + memset(&sk->sk_node.pprev, 0, + size - offsetof(struct sock, sk_node.pprev)); +} + void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) { unsigned long nulls1, nulls2; diff --git a/trunk/net/core/sock_diag.c b/trunk/net/core/sock_diag.c index a0e9cf6379de..d5bef0b0f639 100644 --- a/trunk/net/core/sock_diag.c +++ b/trunk/net/core/sock_diag.c @@ -73,13 +73,8 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, goto out; } - if (filter) { - struct sock_filter *fb = (struct sock_filter *)nla_data(attr); - int i; - - for (i = 0; i < filter->len; i++, fb++) - sk_decode_filter(&filter->insns[i], fb); - } + if (filter) + memcpy(nla_data(attr), filter->insns, len); out: rcu_read_unlock(); diff --git a/trunk/net/ipv4/gre.c b/trunk/net/ipv4/gre.c index 7856d1651d05..b2e805af9b87 100644 --- a/trunk/net/ipv4/gre.c +++ b/trunk/net/ipv4/gre.c @@ -178,7 +178,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, err = __skb_linearize(skb); if (err) { - kfree_skb_list(segs); + kfree_skb(segs); segs = ERR_PTR(err); goto out; } diff --git a/trunk/net/ipv4/ip_gre.c b/trunk/net/ipv4/ip_gre.c index 2a83591492dd..c625e4dad4b0 100644 --- a/trunk/net/ipv4/ip_gre.c +++ b/trunk/net/ipv4/ip_gre.c @@ -235,7 +235,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) */ struct net *net = dev_net(skb->dev); struct ip_tunnel_net *itn; - const struct iphdr *iph; + const struct iphdr *iph = (const struct iphdr *)skb->data; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct ip_tunnel *t; @@ -281,7 +281,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info) else itn = net_generic(net, ipgre_net_id); - iph = (const struct iphdr *)skb->data; t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags, iph->daddr, iph->saddr, tpi.key); diff --git a/trunk/net/ipv4/ip_output.c b/trunk/net/ipv4/ip_output.c index 4bcabf3ab4ca..147abf5275aa 100644 --- a/trunk/net/ipv4/ip_output.c +++ b/trunk/net/ipv4/ip_output.c @@ -84,7 +84,7 @@ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL; EXPORT_SYMBOL(sysctl_ip_default_ttl); /* Generate a checksum for an outgoing IP datagram. */ -void ip_send_check(struct iphdr *iph) +__inline__ void ip_send_check(struct iphdr *iph) { iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); diff --git a/trunk/net/ipv4/ip_tunnel.c b/trunk/net/ipv4/ip_tunnel.c index 7fa8f08fa7ae..e4147ec1665a 100644 --- a/trunk/net/ipv4/ip_tunnel.c +++ b/trunk/net/ipv4/ip_tunnel.c @@ -503,7 +503,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, inner_iph = (const struct iphdr *)skb_inner_network_header(skb); - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); dst = tnl_params->daddr; if (dst == 0) { /* NBMA tunnel */ @@ -659,6 +658,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); /* Push down and install the IP header. */ skb_push(skb, sizeof(struct iphdr)); @@ -853,7 +853,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) } EXPORT_SYMBOL_GPL(ip_tunnel_dellink); -int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, +int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, struct rtnl_link_ops *ops, char *devname) { struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id); @@ -899,7 +899,7 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head) unregister_netdevice_queue(itn->fb_tunnel_dev, head); } -void ip_tunnel_delete_net(struct ip_tunnel_net *itn) +void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn) { LIST_HEAD(list); diff --git a/trunk/net/ipv4/ip_vti.c b/trunk/net/ipv4/ip_vti.c index c118f6b576bb..9d2bdb2c1d3f 100644 --- a/trunk/net/ipv4/ip_vti.c +++ b/trunk/net/ipv4/ip_vti.c @@ -361,7 +361,8 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) tunnel->err_count = 0; } - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | + IPSKB_REROUTED); skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); nf_reset(skb); diff --git a/trunk/net/ipv4/netfilter/ipt_ULOG.c b/trunk/net/ipv4/netfilter/ipt_ULOG.c index 32b0e978c8e0..f8a222cb6448 100644 --- a/trunk/net/ipv4/netfilter/ipt_ULOG.c +++ b/trunk/net/ipv4/netfilter/ipt_ULOG.c @@ -125,16 +125,15 @@ static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum) /* timer function to flush queue in flushtimeout time */ static void ulog_timer(unsigned long data) { - unsigned int groupnum = *((unsigned int *)data); struct ulog_net *ulog = container_of((void *)data, struct ulog_net, - nlgroup[groupnum]); + nlgroup[*(unsigned int *)data]); pr_debug("timer function called, calling ulog_send\n"); /* lock to protect against somebody modifying our structure * from ipt_ulog_target at the same time */ spin_lock_bh(&ulog->lock); - ulog_send(ulog, groupnum); + ulog_send(ulog, data); spin_unlock_bh(&ulog->lock); } @@ -163,8 +162,7 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size) return skb; } -static void ipt_ulog_packet(struct net *net, - unsigned int hooknum, +static void ipt_ulog_packet(unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, @@ -176,6 +174,7 @@ static void ipt_ulog_packet(struct net *net, size_t size, copy_len; struct nlmsghdr *nlh; struct timeval tv; + struct net *net = dev_net(in ? in : out); struct ulog_net *ulog = ulog_pernet(net); /* ffs == find first bit set, necessary because userspace @@ -232,10 +231,8 @@ static void ipt_ulog_packet(struct net *net, put_unaligned(tv.tv_usec, &pm->timestamp_usec); put_unaligned(skb->mark, &pm->mark); pm->hook = hooknum; - if (prefix != NULL) { - strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1); - pm->prefix[sizeof(pm->prefix) - 1] = '\0'; - } + if (prefix != NULL) + strncpy(pm->prefix, prefix, sizeof(pm->prefix)); else if (loginfo->prefix[0] != '\0') strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix)); else @@ -294,15 +291,12 @@ static void ipt_ulog_packet(struct net *net, static unsigned int ulog_tg(struct sk_buff *skb, const struct xt_action_param *par) { - struct net *net = dev_net(par->in ? par->in : par->out); - - ipt_ulog_packet(net, par->hooknum, skb, par->in, par->out, + ipt_ulog_packet(par->hooknum, skb, par->in, par->out, par->targinfo, NULL); return XT_CONTINUE; } -static void ipt_logfn(struct net *net, - u_int8_t pf, +static void ipt_logfn(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, @@ -324,7 +318,7 @@ static void ipt_logfn(struct net *net, strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix)); } - ipt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix); + ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); } static int ulog_tg_check(const struct xt_tgchk_param *par) @@ -408,11 +402,8 @@ static int __net_init ulog_tg_net_init(struct net *net) spin_lock_init(&ulog->lock); /* initialize ulog_buffers */ - for (i = 0; i < ULOG_MAXNLGROUPS; i++) { - ulog->nlgroup[i] = i; - setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer, - (unsigned long)&ulog->nlgroup[i]); - } + for (i = 0; i < ULOG_MAXNLGROUPS; i++) + setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer, i); ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg); if (!ulog->nflognl) diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index d35bbf0cf404..550781a17b34 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -737,15 +737,10 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf { struct rtable *rt; struct flowi4 fl4; - const struct iphdr *iph = (const struct iphdr *) skb->data; - int oif = skb->dev->ifindex; - u8 tos = RT_TOS(iph->tos); - u8 prot = iph->protocol; - u32 mark = skb->mark; rt = (struct rtable *) dst; - __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0); + ip_rt_build_flow_key(&fl4, sk, skb); __ip_do_redirect(rt, skb, &fl4, true); } diff --git a/trunk/net/ipv4/tcp.c b/trunk/net/ipv4/tcp.c index ab450c099aa4..dcb116dde216 100644 --- a/trunk/net/ipv4/tcp.c +++ b/trunk/net/ipv4/tcp.c @@ -2887,7 +2887,6 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, unsigned int mss; struct sk_buff *gso_skb = skb; __sum16 newcheck; - bool ooo_okay, copy_destructor; if (!pskb_may_pull(skb, sizeof(*th))) goto out; @@ -2928,18 +2927,10 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, goto out; } - copy_destructor = gso_skb->destructor == tcp_wfree; - ooo_okay = gso_skb->ooo_okay; - /* All segments but the first should have ooo_okay cleared */ - skb->ooo_okay = 0; - segs = skb_segment(skb, features); if (IS_ERR(segs)) goto out; - /* Only first segment might have ooo_okay set */ - segs->ooo_okay = ooo_okay; - delta = htonl(oldlen + (thlen + mss)); skb = segs; @@ -2959,17 +2950,6 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, thlen, skb->csum)); seq += mss; - if (copy_destructor) { - skb->destructor = gso_skb->destructor; - skb->sk = gso_skb->sk; - /* {tcp|sock}_wfree() use exact truesize accounting : - * sum(skb->truesize) MUST be exactly be gso_skb->truesize - * So we account mss bytes of 'true size' for each segment. - * The last segment will contain the remaining. - */ - skb->truesize = mss; - gso_skb->truesize -= mss; - } skb = skb->next; th = tcp_hdr(skb); @@ -2982,7 +2962,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, * is freed at TX completion, and not right now when gso_skb * is freed by GSO engine */ - if (copy_destructor) { + if (gso_skb->destructor == tcp_wfree) { swap(gso_skb->sk, skb->sk); swap(gso_skb->destructor, skb->destructor); swap(gso_skb->truesize, skb->truesize); @@ -3289,11 +3269,8 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, for (i = 0; i < shi->nr_frags; ++i) { const struct skb_frag_struct *f = &shi->frags[i]; - unsigned int offset = f->page_offset; - struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); - - sg_set_page(&sg, page, skb_frag_size(f), - offset_in_page(offset)); + struct page *page = skb_frag_page(f); + sg_set_page(&sg, page, skb_frag_size(f), f->page_offset); if (crypto_hash_update(desc, &sg, skb_frag_size(f))) return 1; } diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index 9c6225780bd5..08bbe6096528 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -2743,8 +2743,8 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, - int prior_sacked, int prior_packets, - bool is_dupack, int flag) + int prior_sacked, bool is_dupack, + int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -2804,8 +2804,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, tcp_add_reno_sack(sk); } else do_lost = tcp_try_undo_partial(sk, pkts_acked); - newly_acked_sacked = prior_packets - tp->packets_out + - tp->sacked_out - prior_sacked; + newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; break; case TCP_CA_Loss: tcp_process_loss(sk, flag, is_dupack); @@ -2819,8 +2818,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, if (is_dupack) tcp_add_reno_sack(sk); } - newly_acked_sacked = prior_packets - tp->packets_out + - tp->sacked_out - prior_sacked; + newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); @@ -3332,10 +3330,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) bool is_dupack = false; u32 prior_in_flight; u32 prior_fackets; - int prior_packets = tp->packets_out; + int prior_packets; int prior_sacked = tp->sacked_out; int pkts_acked = 0; - int previous_packets_out = 0; /* If the ack is older than previous acks * then we can probably ignore it. @@ -3406,14 +3403,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) sk->sk_err_soft = 0; icsk->icsk_probes_out = 0; tp->rcv_tstamp = tcp_time_stamp; + prior_packets = tp->packets_out; if (!prior_packets) goto no_queue; /* See if we can take anything off of the retransmit queue. */ - previous_packets_out = tp->packets_out; flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); - pkts_acked = previous_packets_out - tp->packets_out; + pkts_acked = prior_packets - tp->packets_out; if (tcp_ack_is_dubious(sk, flag)) { /* Advance CWND, if state allows this. */ @@ -3421,7 +3418,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_cong_avoid(sk, ack, prior_in_flight); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, - prior_packets, is_dupack, flag); + is_dupack, flag); } else { if (flag & FLAG_DATA_ACKED) tcp_cong_avoid(sk, ack, prior_in_flight); @@ -3444,7 +3441,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, - prior_packets, is_dupack, flag); + is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. @@ -3467,7 +3464,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (TCP_SKB_CB(skb)->sacked) { flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, - prior_packets, is_dupack, flag); + is_dupack, flag); } SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); diff --git a/trunk/net/ipv4/tcp_ipv4.c b/trunk/net/ipv4/tcp_ipv4.c index 7999fc55c83b..719652305a29 100644 --- a/trunk/net/ipv4/tcp_ipv4.c +++ b/trunk/net/ipv4/tcp_ipv4.c @@ -1003,7 +1003,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_info *md5sig; - key = tcp_md5_do_lookup(sk, addr, family); + key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); if (key) { /* Pre-existing entry - just update that one. */ memcpy(key->key, newkey, newkeylen); @@ -1048,7 +1048,7 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) struct tcp_md5sig_key *key; struct tcp_md5sig_info *md5sig; - key = tcp_md5_do_lookup(sk, addr, family); + key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); if (!key) return -ENOENT; hlist_del_rcu(&key->node); diff --git a/trunk/net/ipv4/tcp_output.c b/trunk/net/ipv4/tcp_output.c index ec335fabd5cc..536d40929ba6 100644 --- a/trunk/net/ipv4/tcp_output.c +++ b/trunk/net/ipv4/tcp_output.c @@ -874,13 +874,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, &md5); tcp_header_size = tcp_options_size + sizeof(struct tcphdr); - if (tcp_packets_in_flight(tp) == 0) + if (tcp_packets_in_flight(tp) == 0) { tcp_ca_event(sk, CA_EVENT_TX_START); - - /* if no packet is in qdisc/device queue, then allow XPS to select - * another queue. - */ - skb->ooo_okay = sk_wmem_alloc_get(sk) == 0; + skb->ooo_okay = 1; + } else + skb->ooo_okay = 0; skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index 4ab4c38958c6..d1ab6ab29a55 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -1487,7 +1487,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev) } int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, - const struct net_device *dev, int strict) + struct net_device *dev, int strict) { struct inet6_ifaddr *ifp; unsigned int hash = inet6_addr_hash(addr); @@ -2655,16 +2655,11 @@ static void init_loopback(struct net_device *dev) if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) continue; - if (sp_ifa->rt) - continue; - sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); /* Failure cases are ignored */ - if (!IS_ERR(sp_rt)) { - sp_ifa->rt = sp_rt; + if (!IS_ERR(sp_rt)) ip6_ins_rt(sp_rt); - } } read_unlock_bh(&idev->lock); } @@ -4306,7 +4301,6 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) struct inet6_ifaddr *ifp; struct net_device *dev = idev->dev; bool update_rs = false; - struct in6_addr ll_addr; if (token == NULL) return -EINVAL; @@ -4326,9 +4320,11 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) write_unlock_bh(&idev->lock); - if (!idev->dead && (idev->if_flags & IF_READY) && - !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE | - IFA_F_OPTIMISTIC)) { + if (!idev->dead && (idev->if_flags & IF_READY)) { + struct in6_addr ll_addr; + + ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE | + IFA_F_OPTIMISTIC); /* If we're not ready, then normal ifup will take care * of this. Otherwise, we need to request our rs here. diff --git a/trunk/net/ipv6/ip6_gre.c b/trunk/net/ipv6/ip6_gre.c index ecd60733e5e2..d3ddd8400354 100644 --- a/trunk/net/ipv6/ip6_gre.c +++ b/trunk/net/ipv6/ip6_gre.c @@ -1081,7 +1081,6 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev, } if (t == NULL) t = netdev_priv(dev); - memset(&p, 0, sizeof(p)); ip6gre_tnl_parm_to_user(&p, &t->parms); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) err = -EFAULT; @@ -1129,7 +1128,6 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev, if (t) { err = 0; - memset(&p, 0, sizeof(p)); ip6gre_tnl_parm_to_user(&p, &t->parms); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) err = -EFAULT; diff --git a/trunk/net/ipv6/ip6_output.c b/trunk/net/ipv6/ip6_output.c index d5d20cde8d92..d2eedf192330 100644 --- a/trunk/net/ipv6/ip6_output.c +++ b/trunk/net/ipv6/ip6_output.c @@ -381,8 +381,9 @@ int ip6_forward(struct sk_buff *skb) * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ - if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) { - if (ip6_call_ra_chain(skb, ntohs(opt->ra))) + if (opt->ra) { + u8 *ptr = skb_network_header(skb) + opt->ra; + if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } @@ -821,17 +822,11 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk, const struct flowi6 *fl6) { struct ipv6_pinfo *np = inet6_sk(sk); - struct rt6_info *rt; + struct rt6_info *rt = (struct rt6_info *)dst; if (!dst) goto out; - if (dst->ops->family != AF_INET6) { - dst_release(dst); - return NULL; - } - - rt = (struct rt6_info *)dst; /* Yes, checking route validity in not connected * case is not very simple. Take into account, * that we do not support routing by source, TOS, @@ -1152,7 +1147,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, if (WARN_ON(np->cork.opt)) return -EINVAL; - np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation); + np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation); if (unlikely(np->cork.opt == NULL)) return -ENOBUFS; diff --git a/trunk/net/ipv6/ndisc.c b/trunk/net/ipv6/ndisc.c index ca4ffcc287f1..2712ab22a174 100644 --- a/trunk/net/ipv6/ndisc.c +++ b/trunk/net/ipv6/ndisc.c @@ -1493,7 +1493,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) */ if (ha) - ndisc_fill_addr_option(buff, ND_OPT_TARGET_LL_ADDR, ha); + ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha); /* * build redirect option and copy skb over to the new packet. diff --git a/trunk/net/ipv6/netfilter.c b/trunk/net/ipv6/netfilter.c index 95f3f1da0d7f..72836f40b730 100644 --- a/trunk/net/ipv6/netfilter.c +++ b/trunk/net/ipv6/netfilter.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -187,10 +186,6 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, return csum; }; -static const struct nf_ipv6_ops ipv6ops = { - .chk_addr = ipv6_chk_addr, -}; - static const struct nf_afinfo nf_ip6_afinfo = { .family = AF_INET6, .checksum = nf_ip6_checksum, @@ -203,7 +198,6 @@ static const struct nf_afinfo nf_ip6_afinfo = { int __init ipv6_netfilter_init(void) { - RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops); return nf_register_afinfo(&nf_ip6_afinfo); } @@ -212,6 +206,5 @@ int __init ipv6_netfilter_init(void) */ void ipv6_netfilter_fini(void) { - RCU_INIT_POINTER(nf_ipv6_ops, NULL); nf_unregister_afinfo(&nf_ip6_afinfo); } diff --git a/trunk/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/trunk/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index c9b6a6e6a1e8..97bcf2bae857 100644 --- a/trunk/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/trunk/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -204,7 +204,7 @@ static unsigned int __ipv6_conntrack_in(struct net *net, if (ct != NULL && !nf_ct_is_untracked(ct)) { help = nfct_help(ct); if ((help && help->helper) || !nf_ct_is_confirmed(ct)) { - nf_conntrack_get_reasm(reasm); + nf_conntrack_get_reasm(skb); NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm, (struct net_device *)in, (struct net_device *)out, diff --git a/trunk/net/ipv6/proc.c b/trunk/net/ipv6/proc.c index 51c3285b5d9b..f3c1ff4357ff 100644 --- a/trunk/net/ipv6/proc.c +++ b/trunk/net/ipv6/proc.c @@ -90,7 +90,7 @@ static const struct snmp_mib snmp6_ipstats_list[] = { SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS), SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS), SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS), - /* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */ + SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS), SNMP_MIB_SENTINEL }; diff --git a/trunk/net/ipv6/tcp_ipv6.c b/trunk/net/ipv6/tcp_ipv6.c index 0a17ed9eaf39..71167069b394 100644 --- a/trunk/net/ipv6/tcp_ipv6.c +++ b/trunk/net/ipv6/tcp_ipv6.c @@ -1890,17 +1890,6 @@ void tcp6_proc_exit(struct net *net) } #endif -static void tcp_v6_clear_sk(struct sock *sk, int size) -{ - struct inet_sock *inet = inet_sk(sk); - - /* we do not want to clear pinet6 field, because of RCU lookups */ - sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6)); - - size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); - memset(&inet->pinet6 + 1, 0, size); -} - struct proto tcpv6_prot = { .name = "TCPv6", .owner = THIS_MODULE, @@ -1944,7 +1933,6 @@ struct proto tcpv6_prot = { #ifdef CONFIG_MEMCG_KMEM .proto_cgroup = tcp_proto_cgroup, #endif - .clear_sk = tcp_v6_clear_sk, }; static const struct inet6_protocol tcpv6_protocol = { diff --git a/trunk/net/ipv6/udp.c b/trunk/net/ipv6/udp.c index 42923b14dfa6..d4defdd44937 100644 --- a/trunk/net/ipv6/udp.c +++ b/trunk/net/ipv6/udp.c @@ -1432,17 +1432,6 @@ void udp6_proc_exit(struct net *net) { } #endif /* CONFIG_PROC_FS */ -void udp_v6_clear_sk(struct sock *sk, int size) -{ - struct inet_sock *inet = inet_sk(sk); - - /* we do not want to clear pinet6 field, because of RCU lookups */ - sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6)); - - size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); - memset(&inet->pinet6 + 1, 0, size); -} - /* ------------------------------------------------------------------------ */ struct proto udpv6_prot = { @@ -1473,7 +1462,7 @@ struct proto udpv6_prot = { .compat_setsockopt = compat_udpv6_setsockopt, .compat_getsockopt = compat_udpv6_getsockopt, #endif - .clear_sk = udp_v6_clear_sk, + .clear_sk = sk_prot_clear_portaddr_nulls, }; static struct inet_protosw udpv6_protosw = { diff --git a/trunk/net/ipv6/udp_impl.h b/trunk/net/ipv6/udp_impl.h index 4691ed50a928..d7571046bfc4 100644 --- a/trunk/net/ipv6/udp_impl.h +++ b/trunk/net/ipv6/udp_impl.h @@ -31,8 +31,6 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); extern void udpv6_destroy_sock(struct sock *sk); -extern void udp_v6_clear_sk(struct sock *sk, int size); - #ifdef CONFIG_PROC_FS extern int udp6_seq_show(struct seq_file *seq, void *v); #endif diff --git a/trunk/net/ipv6/udp_offload.c b/trunk/net/ipv6/udp_offload.c index d3cfaf9c7a08..3bb3a891a424 100644 --- a/trunk/net/ipv6/udp_offload.c +++ b/trunk/net/ipv6/udp_offload.c @@ -46,12 +46,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, unsigned int mss; unsigned int unfrag_ip6hlen, unfrag_len; struct frag_hdr *fptr; - u8 *packet_start, *prevhdr; + u8 *mac_start, *prevhdr; u8 nexthdr; u8 frag_hdr_sz = sizeof(struct frag_hdr); int offset; __wsum csum; - int tnl_hlen; mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) @@ -84,11 +83,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - tnl_hlen = skb_tnl_header_len(skb); - if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) { - if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) - goto out; - } + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && + pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) + goto out; /* Find the unfragmentable header and shift it left by frag_hdr_sz * bytes to insert fragment header. @@ -96,12 +93,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; *prevhdr = NEXTHDR_FRAGMENT; - unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + - unfrag_ip6hlen + tnl_hlen; - packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; - memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); + unfrag_len = skb_network_header(skb) - skb_mac_header(skb) + + unfrag_ip6hlen; + mac_start = skb_mac_header(skb); + memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len); - SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; skb->mac_header -= frag_hdr_sz; skb->network_header -= frag_hdr_sz; diff --git a/trunk/net/ipv6/udplite.c b/trunk/net/ipv6/udplite.c index dfcc4be46898..1d08e21d9f69 100644 --- a/trunk/net/ipv6/udplite.c +++ b/trunk/net/ipv6/udplite.c @@ -56,7 +56,7 @@ struct proto udplitev6_prot = { .compat_setsockopt = compat_udpv6_setsockopt, .compat_getsockopt = compat_udpv6_getsockopt, #endif - .clear_sk = udp_v6_clear_sk, + .clear_sk = sk_prot_clear_portaddr_nulls, }; static struct inet_protosw udplite6_protosw = { diff --git a/trunk/net/ipv6/xfrm6_policy.c b/trunk/net/ipv6/xfrm6_policy.c index 23ed03d786c8..4ef7bdb65440 100644 --- a/trunk/net/ipv6/xfrm6_policy.c +++ b/trunk/net/ipv6/xfrm6_policy.c @@ -103,10 +103,8 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, dev_hold(dev); xdst->u.rt6.rt6i_idev = in6_dev_get(dev); - if (!xdst->u.rt6.rt6i_idev) { - dev_put(dev); + if (!xdst->u.rt6.rt6i_idev) return -ENODEV; - } rt6_transfer_peer(&xdst->u.rt6, rt); diff --git a/trunk/net/irda/irlap_frame.c b/trunk/net/irda/irlap_frame.c index 9ea0c933b9ff..8c004161a843 100644 --- a/trunk/net/irda/irlap_frame.c +++ b/trunk/net/irda/irlap_frame.c @@ -544,7 +544,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, /* * We now have some discovery info to deliver! */ - discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC); + discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC); if (!discovery) { IRDA_WARNING("%s: unable to malloc!\n", __func__); return; diff --git a/trunk/net/key/af_key.c b/trunk/net/key/af_key.c index 9da862070dd8..5b1e5af25713 100644 --- a/trunk/net/key/af_key.c +++ b/trunk/net/key/af_key.c @@ -1710,7 +1710,6 @@ static int key_notify_sa_flush(const struct km_event *c) hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); - hdr->sadb_msg_reserved = 0; pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); @@ -2367,8 +2366,6 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa out: xfrm_pol_put(xp); - if (err == 0) - xfrm_garbage_collect(net); return err; } @@ -2618,8 +2615,6 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_ out: xfrm_pol_put(xp); - if (delete && err == 0) - xfrm_garbage_collect(net); return err; } @@ -2700,7 +2695,6 @@ static int key_notify_policy_flush(const struct km_event *c) hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); - hdr->sadb_msg_reserved = 0; pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; diff --git a/trunk/net/l2tp/l2tp_ppp.c b/trunk/net/l2tp/l2tp_ppp.c index 8dec6876dc50..637a341c1e2d 100644 --- a/trunk/net/l2tp/l2tp_ppp.c +++ b/trunk/net/l2tp/l2tp_ppp.c @@ -346,19 +346,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh skb_put(skb, 2); /* Copy user data into skb */ - error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov, - total_len); + error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); if (error < 0) { kfree_skb(skb); goto error_put_sess_tun; } + skb_put(skb, total_len); l2tp_xmit_skb(session, skb, session->hdr_len); sock_put(ps->tunnel_sock); sock_put(sk); - return total_len; + return error; error_put_sess_tun: sock_put(ps->tunnel_sock); diff --git a/trunk/net/mac80211/cfg.c b/trunk/net/mac80211/cfg.c index 4fdb306e42e0..1a89c80e6407 100644 --- a/trunk/net/mac80211/cfg.c +++ b/trunk/net/mac80211/cfg.c @@ -1057,12 +1057,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); - if (sdata->wdev.cac_started) { - cancel_delayed_work_sync(&sdata->dfs_cac_timer_work); - cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED, - GFP_KERNEL); - } - drv_stop_ap(sdata->local, sdata); /* free all potentially still buffered bcast frames */ diff --git a/trunk/net/mac80211/ieee80211_i.h b/trunk/net/mac80211/ieee80211_i.h index 9ca8e3278cc0..158e6eb188d3 100644 --- a/trunk/net/mac80211/ieee80211_i.h +++ b/trunk/net/mac80211/ieee80211_i.h @@ -1267,7 +1267,6 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata); void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, __le16 fc, bool acked); -void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); /* IBSS code */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); @@ -1497,11 +1496,10 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, ieee80211_tx_skb_tid(sdata, skb, 7); } -u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, +u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc); -static inline void ieee802_11_parse_elems(const u8 *start, size_t len, - bool action, +static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action, struct ieee802_11_elems *elems) { ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0); diff --git a/trunk/net/mac80211/iface.c b/trunk/net/mac80211/iface.c index 98d20c0f6fed..60f1ce5e5e52 100644 --- a/trunk/net/mac80211/iface.c +++ b/trunk/net/mac80211/iface.c @@ -159,10 +159,9 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr) +static int ieee80211_verify_mac(struct ieee80211_local *local, u8 *addr) { - struct ieee80211_local *local = sdata->local; - struct ieee80211_sub_if_data *iter; + struct ieee80211_sub_if_data *sdata; u64 new, mask, tmp; u8 *m; int ret = 0; @@ -182,14 +181,11 @@ static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr) mutex_lock(&local->iflist_mtx); - list_for_each_entry(iter, &local->interfaces, list) { - if (iter == sdata) - continue; - - if (iter->vif.type == NL80211_IFTYPE_MONITOR) + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) continue; - m = iter->vif.addr; + m = sdata->vif.addr; tmp = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); @@ -213,7 +209,7 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr) if (ieee80211_sdata_running(sdata)) return -EBUSY; - ret = ieee80211_verify_mac(sdata, sa->sa_data); + ret = ieee80211_verify_mac(sdata->local, sa->sa_data); if (ret) return ret; @@ -478,9 +474,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) master->control_port_protocol; sdata->control_port_no_encrypt = master->control_port_no_encrypt; - sdata->vif.cab_queue = master->vif.cab_queue; - memcpy(sdata->vif.hw_queue, master->vif.hw_queue, - sizeof(sdata->vif.hw_queue)); break; } case NL80211_IFTYPE_AP: @@ -660,11 +653,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) ieee80211_recalc_ps(local, -1); - if (sdata->vif.type == NL80211_IFTYPE_MONITOR || - sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { - /* XXX: for AP_VLAN, actually track AP queues */ - netif_tx_start_all_queues(dev); - } else if (dev) { + if (dev) { unsigned long flags; int n_acs = IEEE80211_NUM_ACS; int ac; @@ -1490,17 +1479,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local, break; } - /* - * Pick address of existing interface in case user changed - * MAC address manually, default to perm_addr. - */ m = local->hw.wiphy->perm_addr; - list_for_each_entry(sdata, &local->interfaces, list) { - if (sdata->vif.type == NL80211_IFTYPE_MONITOR) - continue; - m = sdata->vif.addr; - break; - } start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); @@ -1717,15 +1696,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) ASSERT_RTNL(); - /* - * Close all AP_VLAN interfaces first, as otherwise they - * might be closed while the AP interface they belong to - * is closed, causing unregister_netdevice_many() to crash. - */ - list_for_each_entry(sdata, &local->interfaces, list) - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) - dev_close(sdata->dev); - mutex_lock(&local->iflist_mtx); list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { list_del(&sdata->list); diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c index 741448b30825..29620bfc7a69 100644 --- a/trunk/net/mac80211/mlme.c +++ b/trunk/net/mac80211/mlme.c @@ -1015,8 +1015,7 @@ static void ieee80211_chswitch_timer(unsigned long data) static void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, - u64 timestamp, struct ieee802_11_elems *elems, - bool beacon) + u64 timestamp, struct ieee802_11_elems *elems) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; @@ -1033,7 +1032,6 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct cfg80211_chan_def new_vht_chandef = {}; const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie; - const struct ieee80211_ht_operation *ht_oper; int secondary_channel_offset = -1; ASSERT_MGD_MTX(ifmgd); @@ -1050,14 +1048,11 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, sec_chan_offs = elems->sec_chan_offs; wide_bw_chansw_ie = elems->wide_bw_chansw_ie; - ht_oper = elems->ht_operation; if (ifmgd->flags & (IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_40MHZ)) { sec_chan_offs = NULL; wide_bw_chansw_ie = NULL; - /* only used for bandwidth here */ - ht_oper = NULL; } if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT) @@ -1099,20 +1094,10 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, return; } - if (!beacon && sec_chan_offs) { + if (sec_chan_offs) { secondary_channel_offset = sec_chan_offs->sec_chan_offs; - } else if (beacon && ht_oper) { - secondary_channel_offset = - ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET; } else if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { - /* - * If it's not a beacon, HT is enabled and the IE not present, - * it's 20 MHz, 802.11-2012 8.5.2.6: - * This element [the Secondary Channel Offset Element] is - * present when switching to a 40 MHz channel. It may be - * present when switching to a 20 MHz channel (in which - * case the secondary channel offset is set to SCN). - */ + /* if HT is enabled and the IE not present, it's still HT */ secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; } @@ -2522,11 +2507,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, u16 capab_info, aid; struct ieee802_11_elems elems; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; - const struct cfg80211_bss_ies *bss_ies = NULL; - struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; u32 changed = 0; int err; - bool ret; /* AssocResp and ReassocResp have identical structure */ @@ -2557,69 +2539,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, ifmgd->aid = aid; - /* - * Some APs are erroneously not including some information in their - * (re)association response frames. Try to recover by using the data - * from the beacon or probe response. This seems to afflict mobile - * 2G/3G/4G wifi routers, reported models include the "Onda PN51T", - * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device. - */ - if ((assoc_data->wmm && !elems.wmm_param) || - (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && - (!elems.ht_cap_elem || !elems.ht_operation)) || - (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && - (!elems.vht_cap_elem || !elems.vht_operation))) { - const struct cfg80211_bss_ies *ies; - struct ieee802_11_elems bss_elems; - - rcu_read_lock(); - ies = rcu_dereference(cbss->ies); - if (ies) - bss_ies = kmemdup(ies, sizeof(*ies) + ies->len, - GFP_ATOMIC); - rcu_read_unlock(); - if (!bss_ies) - return false; - - ieee802_11_parse_elems(bss_ies->data, bss_ies->len, - false, &bss_elems); - if (assoc_data->wmm && - !elems.wmm_param && bss_elems.wmm_param) { - elems.wmm_param = bss_elems.wmm_param; - sdata_info(sdata, - "AP bug: WMM param missing from AssocResp\n"); - } - - /* - * Also check if we requested HT/VHT, otherwise the AP doesn't - * have to include the IEs in the (re)association response. - */ - if (!elems.ht_cap_elem && bss_elems.ht_cap_elem && - !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { - elems.ht_cap_elem = bss_elems.ht_cap_elem; - sdata_info(sdata, - "AP bug: HT capability missing from AssocResp\n"); - } - if (!elems.ht_operation && bss_elems.ht_operation && - !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { - elems.ht_operation = bss_elems.ht_operation; - sdata_info(sdata, - "AP bug: HT operation missing from AssocResp\n"); - } - if (!elems.vht_cap_elem && bss_elems.vht_cap_elem && - !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) { - elems.vht_cap_elem = bss_elems.vht_cap_elem; - sdata_info(sdata, - "AP bug: VHT capa missing from AssocResp\n"); - } - if (!elems.vht_operation && bss_elems.vht_operation && - !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) { - elems.vht_operation = bss_elems.vht_operation; - sdata_info(sdata, - "AP bug: VHT operation missing from AssocResp\n"); - } - } - /* * We previously checked these in the beacon/probe response, so * they should be present here. This is just a safety net. @@ -2627,17 +2546,15 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) { sdata_info(sdata, - "HT AP is missing WMM params or HT capability/operation\n"); - ret = false; - goto out; + "HT AP is missing WMM params or HT capability/operation in AssocResp\n"); + return false; } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && (!elems.vht_cap_elem || !elems.vht_operation)) { sdata_info(sdata, - "VHT AP is missing VHT capability/operation\n"); - ret = false; - goto out; + "VHT AP is missing VHT capability/operation in AssocResp\n"); + return false; } mutex_lock(&sdata->local->sta_mtx); @@ -2648,8 +2565,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, sta = sta_info_get(sdata, cbss->bssid); if (WARN_ON(!sta)) { mutex_unlock(&sdata->local->sta_mtx); - ret = false; - goto out; + return false; } sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; @@ -2702,8 +2618,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, sta->sta.addr); WARN_ON(__sta_info_destroy(sta)); mutex_unlock(&sdata->local->sta_mtx); - ret = false; - goto out; + return false; } mutex_unlock(&sdata->local->sta_mtx); @@ -2743,10 +2658,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); ieee80211_sta_reset_beacon_monitor(sdata); - ret = true; - out: - kfree(bss_ies); - return ret; + return true; } static enum rx_mgmt_action __must_check @@ -2884,8 +2796,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, mutex_unlock(&local->iflist_mtx); } - ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, - elems, true); + ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, elems); } @@ -3299,7 +3210,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, - &elems, false); + &elems); } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) { ies_len = skb->len - offsetof(struct ieee80211_mgmt, @@ -3321,7 +3232,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, - &elems, false); + &elems); } break; } @@ -3394,6 +3305,10 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) if (WARN_ON_ONCE(!auth_data)) return -EINVAL; + if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) + tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_INTFL_MLME_CONN_TX; + auth_data->tries++; if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) { @@ -3427,10 +3342,6 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) auth_data->expected_transaction = trans; } - if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) - tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | - IEEE80211_TX_INTFL_MLME_CONN_TX; - ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, auth_data->data, auth_data->data_len, auth_data->bss->bssid, @@ -3454,12 +3365,12 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) * will not answer to direct packet in unassociated state. */ ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], - NULL, 0, (u32) -1, true, 0, + NULL, 0, (u32) -1, true, tx_flags, auth_data->bss->channel, false); rcu_read_unlock(); } - if (tx_flags == 0) { + if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; ifmgd->auth_data->timeout_started = true; run_again(ifmgd, auth_data->timeout); @@ -3712,31 +3623,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) } } -#ifdef CONFIG_PM -void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) -{ - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - - mutex_lock(&ifmgd->mtx); - if (!ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); - return; - } - - if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) { - sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME; - mlme_dbg(sdata, "driver requested disconnect after resume\n"); - ieee80211_sta_connection_lost(sdata, - ifmgd->associated->bssid, - WLAN_REASON_UNSPECIFIED, - true); - mutex_unlock(&ifmgd->mtx); - return; - } - mutex_unlock(&ifmgd->mtx); -} -#endif - /* interface setup */ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) { @@ -4443,7 +4329,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; bool tx = !req->local_state_change; - bool report_frame = false; + bool sent_frame = false; mutex_lock(&ifmgd->mtx); @@ -4460,7 +4346,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_destroy_auth_data(sdata, false); mutex_unlock(&ifmgd->mtx); - report_frame = true; + sent_frame = tx; goto out; } @@ -4468,12 +4354,12 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, ether_addr_equal(ifmgd->associated->bssid, req->bssid)) { ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, req->reason_code, tx, frame_buf); - report_frame = true; + sent_frame = tx; } mutex_unlock(&ifmgd->mtx); out: - if (report_frame) + if (sent_frame) __cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); diff --git a/trunk/net/mac80211/rate.c b/trunk/net/mac80211/rate.c index a02bef35b134..0d51877efdb7 100644 --- a/trunk/net/mac80211/rate.c +++ b/trunk/net/mac80211/rate.c @@ -615,7 +615,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata, if (rates[i].idx < 0) break; - rate_idx_match_mask(&rates[i], sband, chan_width, mask, + rate_idx_match_mask(&rates[i], sband, mask, chan_width, mcs_mask); } } @@ -688,15 +688,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct ieee80211_sta_rates *rates) { - struct ieee80211_sta_rates *old; + struct ieee80211_sta_rates *old = rcu_dereference(pubsta->rates); - /* - * mac80211 guarantees that this function will not be called - * concurrently, so the following RCU access is safe, even without - * extra locking. This can not be checked easily, so we just set - * the condition to true. - */ - old = rcu_dereference_protected(pubsta->rates, true); rcu_assign_pointer(pubsta->rates, rates); if (old) kfree_rcu(old, rcu_head); diff --git a/trunk/net/mac80211/rx.c b/trunk/net/mac80211/rx.c index 8e2952620256..c8447af76ead 100644 --- a/trunk/net/mac80211/rx.c +++ b/trunk/net/mac80211/rx.c @@ -3036,9 +3036,6 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, * and location updates. Note that mac80211 * itself never looks at these frames. */ - if (!multicast && - !ether_addr_equal(sdata->vif.addr, hdr->addr1)) - return 0; if (ieee80211_is_public_action(hdr, skb->len)) return 1; if (!ieee80211_is_beacon(hdr->frame_control)) diff --git a/trunk/net/mac80211/tkip.c b/trunk/net/mac80211/tkip.c index 124b1fdc20d0..3ed801d90f1e 100644 --- a/trunk/net/mac80211/tkip.c +++ b/trunk/net/mac80211/tkip.c @@ -208,10 +208,10 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, u32 iv32 = get_unaligned_le32(&data[4]); u16 iv16 = data[2] | (data[0] << 8); - spin_lock(&key->u.tkip.txlock); + spin_lock_bh(&key->u.tkip.txlock); ieee80211_compute_tkip_p1k(key, iv32); tkip_mixing_phase2(tk, ctx, iv16, p2k); - spin_unlock(&key->u.tkip.txlock); + spin_unlock_bh(&key->u.tkip.txlock); } EXPORT_SYMBOL(ieee80211_get_tkip_p2k); diff --git a/trunk/net/mac80211/util.c b/trunk/net/mac80211/util.c index 72e6292955bb..3f87fa468b1f 100644 --- a/trunk/net/mac80211/util.c +++ b/trunk/net/mac80211/util.c @@ -661,12 +661,12 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_queue_delayed_work); -u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, +u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc) { size_t left = len; - const u8 *pos = start; + u8 *pos = start; bool calc_crc = filter != 0; DECLARE_BITMAP(seen_elems, 256); const u8 *ie; @@ -1740,13 +1740,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) mb(); local->resuming = false; - list_for_each_entry(sdata, &local->interfaces, list) { - if (!ieee80211_sdata_running(sdata)) - continue; - if (sdata->vif.type == NL80211_IFTYPE_STATION) - ieee80211_sta_restart(sdata); - } - mod_timer(&local->sta_cleanup, jiffies + 1); #else WARN_ON(1); diff --git a/trunk/net/netfilter/core.c b/trunk/net/netfilter/core.c index 857ca9f35177..07c865a31a3d 100644 --- a/trunk/net/netfilter/core.c +++ b/trunk/net/netfilter/core.c @@ -30,8 +30,6 @@ static DEFINE_MUTEX(afinfo_mutex); const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; EXPORT_SYMBOL(nf_afinfo); -const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly; -EXPORT_SYMBOL_GPL(nf_ipv6_ops); int nf_register_afinfo(const struct nf_afinfo *afinfo) { diff --git a/trunk/net/netfilter/ipvs/ip_vs_core.c b/trunk/net/netfilter/ipvs/ip_vs_core.c index 23b8eb53a569..085b5880ab0d 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_core.c +++ b/trunk/net/netfilter/ipvs/ip_vs_core.c @@ -1001,32 +1001,6 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len) return th->rst; } -static inline bool is_new_conn(const struct sk_buff *skb, - struct ip_vs_iphdr *iph) -{ - switch (iph->protocol) { - case IPPROTO_TCP: { - struct tcphdr _tcph, *th; - - th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph); - if (th == NULL) - return false; - return th->syn; - } - case IPPROTO_SCTP: { - sctp_chunkhdr_t *sch, schunk; - - sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t), - sizeof(schunk), &schunk); - if (sch == NULL) - return false; - return sch->type == SCTP_CID_INIT; - } - default: - return false; - } -} - /* Handle response packets: rewrite addresses and send away... */ static unsigned int @@ -1442,8 +1416,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) /* do the statistics and put it back */ ip_vs_in_stats(cp, skb); - if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol || - IPPROTO_SCTP == cih->protocol) + if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) offset += 2 * sizeof(__u16); verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph); @@ -1639,15 +1612,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) * Check if the packet belongs to an existing connection entry */ cp = pp->conn_in_get(af, skb, &iph, 0); - - if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest && - unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs && - is_new_conn(skb, &iph)) { - ip_vs_conn_expire_now(cp); - __ip_vs_conn_put(cp); - cp = NULL; - } - if (unlikely(!cp) && !iph.fragoffs) { /* No (second) fragments need to enter here, as nf_defrag_ipv6 * replayed fragment zero will already have created the cp diff --git a/trunk/net/netfilter/ipvs/ip_vs_ctl.c b/trunk/net/netfilter/ipvs/ip_vs_ctl.c index 9e6c2a075a4c..5b142fb16480 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_ctl.c +++ b/trunk/net/netfilter/ipvs/ip_vs_ctl.c @@ -2542,7 +2542,6 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get, struct ip_vs_dest *dest; struct ip_vs_dest_entry entry; - memset(&entry, 0, sizeof(entry)); list_for_each_entry(dest, &svc->destinations, n_list) { if (count >= get->num_dests) break; diff --git a/trunk/net/netfilter/ipvs/ip_vs_sh.c b/trunk/net/netfilter/ipvs/ip_vs_sh.c index a65edfe4b16c..0df269d7c99f 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_sh.c +++ b/trunk/net/netfilter/ipvs/ip_vs_sh.c @@ -67,8 +67,8 @@ struct ip_vs_sh_bucket { #define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1) struct ip_vs_sh_state { - struct rcu_head rcu_head; struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; + struct rcu_head rcu_head; }; /* diff --git a/trunk/net/netfilter/nf_conntrack_labels.c b/trunk/net/netfilter/nf_conntrack_labels.c index 355d2ef08094..8fe2e99428b7 100644 --- a/trunk/net/netfilter/nf_conntrack_labels.c +++ b/trunk/net/netfilter/nf_conntrack_labels.c @@ -45,7 +45,7 @@ int nf_connlabel_set(struct nf_conn *ct, u16 bit) if (test_bit(bit, labels->bits)) return 0; - if (!test_and_set_bit(bit, labels->bits)) + if (test_and_set_bit(bit, labels->bits)) nf_conntrack_event_cache(IPCT_LABEL, ct); return 0; diff --git a/trunk/net/netfilter/nf_conntrack_netlink.c b/trunk/net/netfilter/nf_conntrack_netlink.c index ecf065f94032..6d0f8a17c5b7 100644 --- a/trunk/net/netfilter/nf_conntrack_netlink.c +++ b/trunk/net/netfilter/nf_conntrack_netlink.c @@ -1825,7 +1825,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, nf_conntrack_eventmask_report((1 << IPCT_REPLY) | (1 << IPCT_ASSURED) | (1 << IPCT_HELPER) | - (1 << IPCT_LABEL) | (1 << IPCT_PROTOINFO) | (1 << IPCT_NATSEQADJ) | (1 << IPCT_MARK), diff --git a/trunk/net/netfilter/nf_log.c b/trunk/net/netfilter/nf_log.c index 3b18dd1be7d9..388656d5a9ec 100644 --- a/trunk/net/netfilter/nf_log.c +++ b/trunk/net/netfilter/nf_log.c @@ -148,7 +148,7 @@ void nf_log_packet(struct net *net, va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); - logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); + logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix); } rcu_read_unlock(); } @@ -368,20 +368,17 @@ static int __net_init nf_log_net_init(struct net *net) return 0; out_sysctl: -#ifdef CONFIG_PROC_FS /* For init_net: errors will trigger panic, don't unroll on error. */ if (!net_eq(net, &init_net)) remove_proc_entry("nf_log", net->nf.proc_netfilter); -#endif + return ret; } static void __net_exit nf_log_net_exit(struct net *net) { netfilter_log_sysctl_exit(net); -#ifdef CONFIG_PROC_FS remove_proc_entry("nf_log", net->nf.proc_netfilter); -#endif } static struct pernet_operations nf_log_net_ops = { diff --git a/trunk/net/netfilter/nf_nat_sip.c b/trunk/net/netfilter/nf_nat_sip.c index dac11f73868e..96ccdf78a29f 100644 --- a/trunk/net/netfilter/nf_nat_sip.c +++ b/trunk/net/netfilter/nf_nat_sip.c @@ -230,10 +230,9 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff, &ct->tuplehash[!dir].tuple.src.u3, false); if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, - poff, plen, buffer, buflen)) { + poff, plen, buffer, buflen)) nf_ct_helper_log(skb, ct, "cannot mangle received"); return NF_DROP; - } } /* The rport= parameter (RFC 3581) contains the port number diff --git a/trunk/net/netfilter/nfnetlink_acct.c b/trunk/net/netfilter/nfnetlink_acct.c index c7b6d466a662..dc3fd5d44464 100644 --- a/trunk/net/netfilter/nfnetlink_acct.c +++ b/trunk/net/netfilter/nfnetlink_acct.c @@ -149,12 +149,9 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); list_for_each_entry_rcu(cur, &nfnl_acct_list, head) { - if (last) { - if (cur != last) - continue; + if (last && cur != last) + continue; - last = NULL; - } if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFNL_MSG_TYPE(cb->nlh->nlmsg_type), diff --git a/trunk/net/netfilter/nfnetlink_cttimeout.c b/trunk/net/netfilter/nfnetlink_cttimeout.c index 65074dfb9383..701c88a20fea 100644 --- a/trunk/net/netfilter/nfnetlink_cttimeout.c +++ b/trunk/net/netfilter/nfnetlink_cttimeout.c @@ -220,12 +220,9 @@ ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); list_for_each_entry_rcu(cur, &cttimeout_list, head) { - if (last) { - if (cur != last) - continue; + if (last && cur != last) + continue; - last = NULL; - } if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFNL_MSG_TYPE(cb->nlh->nlmsg_type), diff --git a/trunk/net/netfilter/nfnetlink_log.c b/trunk/net/netfilter/nfnetlink_log.c index 962e9792e317..faf1e9300d8a 100644 --- a/trunk/net/netfilter/nfnetlink_log.c +++ b/trunk/net/netfilter/nfnetlink_log.c @@ -602,8 +602,7 @@ static struct nf_loginfo default_loginfo = { /* log handler for internal netfilter logging api */ void -nfulnl_log_packet(struct net *net, - u_int8_t pf, +nfulnl_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, @@ -616,6 +615,7 @@ nfulnl_log_packet(struct net *net, const struct nf_loginfo *li; unsigned int qthreshold; unsigned int plen; + struct net *net = dev_net(in ? in : out); struct nfnl_log_net *log = nfnl_log_pernet(net); if (li_user && li_user->type == NF_LOG_TYPE_ULOG) @@ -1045,9 +1045,7 @@ static int __net_init nfnl_log_net_init(struct net *net) static void __net_exit nfnl_log_net_exit(struct net *net) { -#ifdef CONFIG_PROC_FS remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); -#endif } static struct pernet_operations nfnl_log_net_ops = { diff --git a/trunk/net/netfilter/nfnetlink_queue_core.c b/trunk/net/netfilter/nfnetlink_queue_core.c index 5352b2d2d5bf..2e0e835baf72 100644 --- a/trunk/net/netfilter/nfnetlink_queue_core.c +++ b/trunk/net/netfilter/nfnetlink_queue_core.c @@ -637,6 +637,9 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) if (queue->copy_mode == NFQNL_COPY_NONE) return -EINVAL; + if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(entry->skb)) + return __nfqnl_enqueue_packet(net, queue, entry); + skb = entry->skb; switch (entry->pf) { @@ -648,9 +651,6 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) break; } - if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb)) - return __nfqnl_enqueue_packet(net, queue, entry); - nf_bridge_adjust_skb_data(skb); segs = skb_gso_segment(skb, 0); /* Does not use PTR_ERR to limit the number of error codes that can be @@ -1285,9 +1285,7 @@ static int __net_init nfnl_queue_net_init(struct net *net) static void __net_exit nfnl_queue_net_exit(struct net *net) { -#ifdef CONFIG_PROC_FS remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); -#endif } static struct pernet_operations nfnl_queue_net_ops = { diff --git a/trunk/net/netfilter/xt_LOG.c b/trunk/net/netfilter/xt_LOG.c index 5ab24843370a..fe573f6c9e91 100644 --- a/trunk/net/netfilter/xt_LOG.c +++ b/trunk/net/netfilter/xt_LOG.c @@ -466,8 +466,7 @@ log_packet_common(struct sbuff *m, static void -ipt_log_packet(struct net *net, - u_int8_t pf, +ipt_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, @@ -476,6 +475,7 @@ ipt_log_packet(struct net *net, const char *prefix) { struct sbuff *m; + struct net *net = dev_net(in ? in : out); /* FIXME: Disabled from containers until syslog ns is supported */ if (!net_eq(net, &init_net)) @@ -737,7 +737,7 @@ static void dump_ipv6_packet(struct sbuff *m, dump_sk_uid_gid(m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ - if (recurse && skb->mark) + if (!recurse && skb->mark) sb_add(m, "MARK=0x%x ", skb->mark); } @@ -797,8 +797,7 @@ static void dump_ipv6_mac_header(struct sbuff *m, } static void -ip6t_log_packet(struct net *net, - u_int8_t pf, +ip6t_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, @@ -807,6 +806,7 @@ ip6t_log_packet(struct net *net, const char *prefix) { struct sbuff *m; + struct net *net = dev_net(in ? in : out); /* FIXME: Disabled from containers until syslog ns is supported */ if (!net_eq(net, &init_net)) @@ -833,18 +833,17 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_log_info *loginfo = par->targinfo; struct nf_loginfo li; - struct net *net = dev_net(par->in ? par->in : par->out); li.type = NF_LOG_TYPE_LOG; li.u.log.level = loginfo->level; li.u.log.logflags = loginfo->logflags; if (par->family == NFPROTO_IPV4) - ipt_log_packet(net, NFPROTO_IPV4, par->hooknum, skb, par->in, + ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in, par->out, &li, loginfo->prefix); #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) else if (par->family == NFPROTO_IPV6) - ip6t_log_packet(net, NFPROTO_IPV6, par->hooknum, skb, par->in, + ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in, par->out, &li, loginfo->prefix); #endif else diff --git a/trunk/net/netfilter/xt_NFLOG.c b/trunk/net/netfilter/xt_NFLOG.c index fb7497c928a0..a17dd0f589b2 100644 --- a/trunk/net/netfilter/xt_NFLOG.c +++ b/trunk/net/netfilter/xt_NFLOG.c @@ -26,14 +26,13 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_nflog_info *info = par->targinfo; struct nf_loginfo li; - struct net *net = dev_net(par->in ? par->in : par->out); li.type = NF_LOG_TYPE_ULOG; li.u.ulog.copy_len = info->len; li.u.ulog.group = info->group; li.u.ulog.qthreshold = info->threshold; - nfulnl_log_packet(net, par->family, par->hooknum, skb, par->in, + nfulnl_log_packet(par->family, par->hooknum, skb, par->in, par->out, &li, info->prefix); return XT_CONTINUE; } diff --git a/trunk/net/netfilter/xt_TCPMSS.c b/trunk/net/netfilter/xt_TCPMSS.c index 7011c71646f0..a75240f0d42b 100644 --- a/trunk/net/netfilter/xt_TCPMSS.c +++ b/trunk/net/netfilter/xt_TCPMSS.c @@ -45,22 +45,17 @@ optlen(const u_int8_t *opt, unsigned int offset) static int tcpmss_mangle_packet(struct sk_buff *skb, - const struct xt_action_param *par, + const struct xt_tcpmss_info *info, unsigned int in_mtu, unsigned int tcphoff, unsigned int minlen) { - const struct xt_tcpmss_info *info = par->targinfo; struct tcphdr *tcph; unsigned int tcplen, i; __be16 oldval; u16 newmss; u8 *opt; - /* This is a fragment, no TCP header is available */ - if (par->fragoff != 0) - return XT_CONTINUE; - if (!skb_make_writable(skb, skb->len)) return -1; @@ -130,18 +125,6 @@ tcpmss_mangle_packet(struct sk_buff *skb, skb_put(skb, TCPOLEN_MSS); - /* - * IPv4: RFC 1122 states "If an MSS option is not received at - * connection setup, TCP MUST assume a default send MSS of 536". - * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum - * length IPv6 header of 60, ergo the default MSS value is 1220 - * Since no MSS was provided, we must use the default values - */ - if (par->family == NFPROTO_IPV4) - newmss = min(newmss, (u16)536); - else - newmss = min(newmss, (u16)1220); - opt = (u_int8_t *)tcph + sizeof(struct tcphdr); memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); @@ -199,7 +182,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par) __be16 newlen; int ret; - ret = tcpmss_mangle_packet(skb, par, + ret = tcpmss_mangle_packet(skb, par->targinfo, tcpmss_reverse_mtu(skb, PF_INET), iph->ihl * 4, sizeof(*iph) + sizeof(struct tcphdr)); @@ -228,7 +211,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par) tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off); if (tcphoff < 0) return NF_DROP; - ret = tcpmss_mangle_packet(skb, par, + ret = tcpmss_mangle_packet(skb, par->targinfo, tcpmss_reverse_mtu(skb, PF_INET6), tcphoff, sizeof(*ipv6h) + sizeof(struct tcphdr)); diff --git a/trunk/net/netfilter/xt_TCPOPTSTRIP.c b/trunk/net/netfilter/xt_TCPOPTSTRIP.c index b68fa191710f..25fd1c4e1eec 100644 --- a/trunk/net/netfilter/xt_TCPOPTSTRIP.c +++ b/trunk/net/netfilter/xt_TCPOPTSTRIP.c @@ -30,31 +30,18 @@ static inline unsigned int optlen(const u_int8_t *opt, unsigned int offset) static unsigned int tcpoptstrip_mangle_packet(struct sk_buff *skb, - const struct xt_action_param *par, + const struct xt_tcpoptstrip_target_info *info, unsigned int tcphoff, unsigned int minlen) { - const struct xt_tcpoptstrip_target_info *info = par->targinfo; unsigned int optl, i, j; struct tcphdr *tcph; u_int16_t n, o; u_int8_t *opt; - int len; - - /* This is a fragment, no TCP header is available */ - if (par->fragoff != 0) - return XT_CONTINUE; if (!skb_make_writable(skb, skb->len)) return NF_DROP; - len = skb->len - tcphoff; - if (len < (int)sizeof(struct tcphdr)) - return NF_DROP; - tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); - if (tcph->doff * 4 > len) - return NF_DROP; - opt = (u_int8_t *)tcph; /* @@ -89,7 +76,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, static unsigned int tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par) { - return tcpoptstrip_mangle_packet(skb, par, ip_hdrlen(skb), + return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb), sizeof(struct iphdr) + sizeof(struct tcphdr)); } @@ -107,7 +94,7 @@ tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par) if (tcphoff < 0) return NF_DROP; - return tcpoptstrip_mangle_packet(skb, par, tcphoff, + return tcpoptstrip_mangle_packet(skb, par->targinfo, tcphoff, sizeof(*ipv6h) + sizeof(struct tcphdr)); } #endif diff --git a/trunk/net/netfilter/xt_addrtype.c b/trunk/net/netfilter/xt_addrtype.c index 68ff29f60867..49c5ff7f6dd6 100644 --- a/trunk/net/netfilter/xt_addrtype.c +++ b/trunk/net/netfilter/xt_addrtype.c @@ -22,7 +22,6 @@ #include #endif -#include #include #include @@ -34,12 +33,12 @@ MODULE_ALIAS("ip6t_addrtype"); #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, - const struct in6_addr *addr, u16 mask) + const struct in6_addr *addr) { const struct nf_afinfo *afinfo; struct flowi6 flow; struct rt6_info *rt; - u32 ret = 0; + u32 ret; int route_err; memset(&flow, 0, sizeof(flow)); @@ -50,19 +49,12 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, rcu_read_lock(); afinfo = nf_get_afinfo(NFPROTO_IPV6); - if (afinfo != NULL) { - const struct nf_ipv6_ops *v6ops; - - if (dev && (mask & XT_ADDRTYPE_LOCAL)) { - v6ops = nf_get_ipv6_ops(); - if (v6ops && v6ops->chk_addr(net, addr, dev, true)) - ret = XT_ADDRTYPE_LOCAL; - } + if (afinfo != NULL) route_err = afinfo->route(net, (struct dst_entry **)&rt, - flowi6_to_flowi(&flow), false); - } else { + flowi6_to_flowi(&flow), !!dev); + else route_err = 1; - } + rcu_read_unlock(); if (route_err) @@ -70,12 +62,15 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, if (rt->rt6i_flags & RTF_REJECT) ret = XT_ADDRTYPE_UNREACHABLE; + else + ret = 0; - if (dev == NULL && rt->rt6i_flags & RTF_LOCAL) + if (rt->rt6i_flags & RTF_LOCAL) ret |= XT_ADDRTYPE_LOCAL; if (rt->rt6i_flags & RTF_ANYCAST) ret |= XT_ADDRTYPE_ANYCAST; + dst_release(&rt->dst); return ret; } @@ -95,7 +90,7 @@ static bool match_type6(struct net *net, const struct net_device *dev, if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | XT_ADDRTYPE_UNREACHABLE) & mask) - return !!(mask & match_lookup_rt6(net, dev, addr, mask)); + return !!(mask & match_lookup_rt6(net, dev, addr)); return true; } diff --git a/trunk/net/netlabel/netlabel_domainhash.c b/trunk/net/netlabel/netlabel_domainhash.c index 6bb1d42f0fac..d8d424337550 100644 --- a/trunk/net/netlabel/netlabel_domainhash.c +++ b/trunk/net/netlabel/netlabel_domainhash.c @@ -245,71 +245,6 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry, } } -/** - * netlbl_domhsh_validate - Validate a new domain mapping entry - * @entry: the entry to validate - * - * This function validates the new domain mapping entry to ensure that it is - * a valid entry. Returns zero on success, negative values on failure. - * - */ -static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry) -{ - struct netlbl_af4list *iter4; - struct netlbl_domaddr4_map *map4; -#if IS_ENABLED(CONFIG_IPV6) - struct netlbl_af6list *iter6; - struct netlbl_domaddr6_map *map6; -#endif /* IPv6 */ - - if (entry == NULL) - return -EINVAL; - - switch (entry->type) { - case NETLBL_NLTYPE_UNLABELED: - if (entry->type_def.cipsov4 != NULL || - entry->type_def.addrsel != NULL) - return -EINVAL; - break; - case NETLBL_NLTYPE_CIPSOV4: - if (entry->type_def.cipsov4 == NULL) - return -EINVAL; - break; - case NETLBL_NLTYPE_ADDRSELECT: - netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) { - map4 = netlbl_domhsh_addr4_entry(iter4); - switch (map4->type) { - case NETLBL_NLTYPE_UNLABELED: - if (map4->type_def.cipsov4 != NULL) - return -EINVAL; - break; - case NETLBL_NLTYPE_CIPSOV4: - if (map4->type_def.cipsov4 == NULL) - return -EINVAL; - break; - default: - return -EINVAL; - } - } -#if IS_ENABLED(CONFIG_IPV6) - netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) { - map6 = netlbl_domhsh_addr6_entry(iter6); - switch (map6->type) { - case NETLBL_NLTYPE_UNLABELED: - break; - default: - return -EINVAL; - } - } -#endif /* IPv6 */ - break; - default: - return -EINVAL; - } - - return 0; -} - /* * Domain Hash Table Functions */ @@ -376,10 +311,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, struct netlbl_af6list *tmp6; #endif /* IPv6 */ - ret_val = netlbl_domhsh_validate(entry); - if (ret_val != 0) - return ret_val; - /* XXX - we can remove this RCU read lock as the spinlock protects the * entire function, but before we do we need to fixup the * netlbl_af[4,6]list RCU functions to do "the right thing" with diff --git a/trunk/net/netlink/af_netlink.c b/trunk/net/netlink/af_netlink.c index 57ee84d21470..12ac6b47a35c 100644 --- a/trunk/net/netlink/af_netlink.c +++ b/trunk/net/netlink/af_netlink.c @@ -371,7 +371,7 @@ static int netlink_mmap(struct file *file, struct socket *sock, err = 0; out: mutex_unlock(&nlk->pg_vec_lock); - return err; + return 0; } static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr) @@ -747,7 +747,7 @@ static void netlink_skb_destructor(struct sk_buff *skb) atomic_dec(&ring->pending); sock_put(sk); - skb->head = NULL; + skb->data = NULL; } #endif if (skb->sk != NULL) diff --git a/trunk/net/nfc/Makefile b/trunk/net/nfc/Makefile index a76f4533cb6c..fb799deaed4f 100644 --- a/trunk/net/nfc/Makefile +++ b/trunk/net/nfc/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_NFC) += nfc.o obj-$(CONFIG_NFC_NCI) += nci/ obj-$(CONFIG_NFC_HCI) += hci/ +#obj-$(CONFIG_NFC_LLCP) += llcp/ nfc-objs := core.o netlink.o af_nfc.o rawsock.o llcp_core.o llcp_commands.o \ llcp_sock.o diff --git a/trunk/net/packet/af_packet.c b/trunk/net/packet/af_packet.c index 20a1bd0e6549..8ec1bca7f859 100644 --- a/trunk/net/packet/af_packet.c +++ b/trunk/net/packet/af_packet.c @@ -2851,11 +2851,12 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; - memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) - strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); + strncpy(uaddr->sa_data, dev->name, 14); + else + memset(uaddr->sa_data, 0, 14); rcu_read_unlock(); *uaddr_len = sizeof(*uaddr); diff --git a/trunk/net/sched/act_police.c b/trunk/net/sched/act_police.c index 189e3c5b3d09..823463adbd21 100644 --- a/trunk/net/sched/act_police.c +++ b/trunk/net/sched/act_police.c @@ -231,14 +231,14 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, } if (R_tab) { police->rate_present = true; - psched_ratecfg_precompute(&police->rate, &R_tab->rate); + psched_ratecfg_precompute(&police->rate, R_tab->rate.rate); qdisc_put_rtab(R_tab); } else { police->rate_present = false; } if (P_tab) { police->peak_present = true; - psched_ratecfg_precompute(&police->peak, &P_tab->rate); + psched_ratecfg_precompute(&police->peak, P_tab->rate.rate); qdisc_put_rtab(P_tab); } else { police->peak_present = false; @@ -376,9 +376,9 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) }; if (police->rate_present) - psched_ratecfg_getrate(&opt.rate, &police->rate); + opt.rate.rate = psched_ratecfg_getrate(&police->rate); if (police->peak_present) - psched_ratecfg_getrate(&opt.peakrate, &police->peak); + opt.peakrate.rate = psched_ratecfg_getrate(&police->peak); if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) goto nla_put_failure; if (police->tcfp_result && diff --git a/trunk/net/sched/sch_api.c b/trunk/net/sched/sch_api.c index 281c1bded1f6..2b935e7cfe7b 100644 --- a/trunk/net/sched/sch_api.c +++ b/trunk/net/sched/sch_api.c @@ -291,18 +291,17 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta { struct qdisc_rate_table *rtab; - if (tab == NULL || r->rate == 0 || r->cell_log == 0 || - nla_len(tab) != TC_RTAB_SIZE) - return NULL; - for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) { - if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) && - !memcmp(&rtab->data, nla_data(tab), 1024)) { + if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) { rtab->refcnt++; return rtab; } } + if (tab == NULL || r->rate == 0 || r->cell_log == 0 || + nla_len(tab) != TC_RTAB_SIZE) + return NULL; + rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); if (rtab) { rtab->rate = *r; diff --git a/trunk/net/sched/sch_generic.c b/trunk/net/sched/sch_generic.c index 20224086cc28..eac7e0ee23c1 100644 --- a/trunk/net/sched/sch_generic.c +++ b/trunk/net/sched/sch_generic.c @@ -898,16 +898,14 @@ void dev_shutdown(struct net_device *dev) WARN_ON(timer_pending(&dev->watchdog_timer)); } -void psched_ratecfg_precompute(struct psched_ratecfg *r, - const struct tc_ratespec *conf) +void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate) { u64 factor; u64 mult; int shift; - memset(r, 0, sizeof(*r)); - r->overhead = conf->overhead; - r->rate_bps = (u64)conf->rate << 3; + r->rate_bps = (u64)rate << 3; + r->shift = 0; r->mult = 1; /* * Calibrate mult, shift so that token counting is accurate diff --git a/trunk/net/sched/sch_htb.c b/trunk/net/sched/sch_htb.c index adaedd79389c..79b1876b6cd2 100644 --- a/trunk/net/sched/sch_htb.c +++ b/trunk/net/sched/sch_htb.c @@ -109,7 +109,7 @@ struct htb_class { } un; struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ struct rb_node pq_node; /* node for event queue */ - s64 pq_key; + psched_time_t pq_key; int prio_activity; /* for which prios are we active */ enum htb_cmode cmode; /* current mode of the class */ @@ -121,10 +121,10 @@ struct htb_class { /* token bucket parameters */ struct psched_ratecfg rate; struct psched_ratecfg ceil; - s64 buffer, cbuffer; /* token bucket depth/rate */ - s64 mbuffer; /* max wait time */ - s64 tokens, ctokens; /* current number of tokens */ - s64 t_c; /* checkpoint time */ + s64 buffer, cbuffer; /* token bucket depth/rate */ + psched_tdiff_t mbuffer; /* max wait time */ + s64 tokens, ctokens; /* current number of tokens */ + psched_time_t t_c; /* checkpoint time */ }; struct htb_sched { @@ -141,15 +141,15 @@ struct htb_sched { struct rb_root wait_pq[TC_HTB_MAXDEPTH]; /* time of nearest event per level (row) */ - s64 near_ev_cache[TC_HTB_MAXDEPTH]; + psched_time_t near_ev_cache[TC_HTB_MAXDEPTH]; int defcls; /* class where unclassified flows go to */ /* filters for qdisc itself */ struct tcf_proto *filter_list; - int rate2quantum; /* quant = rate / rate2quantum */ - s64 now; /* cached dequeue time */ + int rate2quantum; /* quant = rate / rate2quantum */ + psched_time_t now; /* cached dequeue time */ struct qdisc_watchdog watchdog; /* non shaped skbs; let them go directly thru */ @@ -664,8 +664,8 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, * next pending event (0 for no event in pq, q->now for too many events). * Note: Applied are events whose have cl->pq_key <= q->now. */ -static s64 htb_do_events(struct htb_sched *q, int level, - unsigned long start) +static psched_time_t htb_do_events(struct htb_sched *q, int level, + unsigned long start) { /* don't run for longer than 2 jiffies; 2 is used instead of * 1 to simplify things when jiffy is going to be incremented @@ -857,7 +857,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) struct sk_buff *skb; struct htb_sched *q = qdisc_priv(sch); int level; - s64 next_event; + psched_time_t next_event; unsigned long start_at; /* try to dequeue direct packets as high prio (!) to minimize cpu work */ @@ -880,7 +880,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) for (level = 0; level < TC_HTB_MAXDEPTH; level++) { /* common case optimization - skip event handler quickly */ int m; - s64 event; + psched_time_t event; if (q->now >= q->near_ev_cache[level]) { event = htb_do_events(q, level, start_at); @@ -1090,9 +1090,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, memset(&opt, 0, sizeof(opt)); - psched_ratecfg_getrate(&opt.rate, &cl->rate); + opt.rate.rate = psched_ratecfg_getrate(&cl->rate); opt.buffer = PSCHED_NS2TICKS(cl->buffer); - psched_ratecfg_getrate(&opt.ceil, &cl->ceil); + opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil); opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); opt.quantum = cl->quantum; opt.prio = cl->prio; @@ -1117,8 +1117,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) if (!cl->level && cl->un.leaf.q) cl->qstats.qlen = cl->un.leaf.q->q.qlen; - cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); - cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); + cl->xstats.tokens = cl->tokens; + cl->xstats.ctokens = cl->ctokens; if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || @@ -1200,7 +1200,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, parent->un.leaf.q = new_q ? new_q : &noop_qdisc; parent->tokens = parent->buffer; parent->ctokens = parent->cbuffer; - parent->t_c = ktime_to_ns(ktime_get()); + parent->t_c = psched_get_time(); parent->cmode = HTB_CAN_SEND; } @@ -1417,8 +1417,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* set class to be in HTB_CAN_SEND state */ cl->tokens = PSCHED_TICKS2NS(hopt->buffer); cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); - cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ - cl->t_c = ktime_to_ns(ktime_get()); + cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */ + cl->t_c = psched_get_time(); cl->cmode = HTB_CAN_SEND; /* attach to the hash list and parent's family */ @@ -1459,8 +1459,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, cl->prio = TC_HTB_NUMPRIO - 1; } - psched_ratecfg_precompute(&cl->rate, &hopt->rate); - psched_ratecfg_precompute(&cl->ceil, &hopt->ceil); + psched_ratecfg_precompute(&cl->rate, hopt->rate.rate); + psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate); cl->buffer = PSCHED_TICKS2NS(hopt->buffer); cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer); diff --git a/trunk/net/sched/sch_tbf.c b/trunk/net/sched/sch_tbf.c index e478d316602b..c8388f3c3426 100644 --- a/trunk/net/sched/sch_tbf.c +++ b/trunk/net/sched/sch_tbf.c @@ -298,9 +298,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) q->tokens = q->buffer; q->ptokens = q->mtu; - psched_ratecfg_precompute(&q->rate, &rtab->rate); + psched_ratecfg_precompute(&q->rate, rtab->rate.rate); if (ptab) { - psched_ratecfg_precompute(&q->peak, &ptab->rate); + psched_ratecfg_precompute(&q->peak, ptab->rate.rate); q->peak_present = true; } else { q->peak_present = false; @@ -350,9 +350,9 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) goto nla_put_failure; opt.limit = q->limit; - psched_ratecfg_getrate(&opt.rate, &q->rate); + opt.rate.rate = psched_ratecfg_getrate(&q->rate); if (q->peak_present) - psched_ratecfg_getrate(&opt.peakrate, &q->peak); + opt.peakrate.rate = psched_ratecfg_getrate(&q->peak); else memset(&opt.peakrate, 0, sizeof(opt.peakrate)); opt.mtu = PSCHED_NS2TICKS(q->mtu); diff --git a/trunk/net/sctp/outqueue.c b/trunk/net/sctp/outqueue.c index be35e2dbcc9a..32a4625fef77 100644 --- a/trunk/net/sctp/outqueue.c +++ b/trunk/net/sctp/outqueue.c @@ -206,8 +206,6 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary, */ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) { - memset(q, 0, sizeof(struct sctp_outq)); - q->asoc = asoc; INIT_LIST_HEAD(&q->out_chunk_list); INIT_LIST_HEAD(&q->control_chunk_list); @@ -215,7 +213,11 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) INIT_LIST_HEAD(&q->sacked); INIT_LIST_HEAD(&q->abandoned); + q->fast_rtx = 0; + q->outstanding_bytes = 0; q->empty = 1; + q->cork = 0; + q->out_qlen = 0; } /* Free the outqueue structure and any related pending chunks. diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index 6abb1caf9836..f631c5ff4dbf 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -4003,12 +4003,6 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk) /* Release our hold on the endpoint. */ sp = sctp_sk(sk); - /* This could happen during socket init, thus we bail out - * early, since the rest of the below is not setup either. - */ - if (sp->ep == NULL) - return; - if (sp->do_auto_asconf) { sp->do_auto_asconf = 0; list_del(&sp->auto_asconf_list); diff --git a/trunk/net/socket.c b/trunk/net/socket.c index 4ca1526db756..6b94633ca61d 100644 --- a/trunk/net/socket.c +++ b/trunk/net/socket.c @@ -1956,7 +1956,7 @@ struct used_address { unsigned int name_len; }; -static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, +static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address) { @@ -2071,30 +2071,22 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * BSD sendmsg interface */ -long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) +SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { int fput_needed, err; struct msghdr msg_sys; - struct socket *sock; + struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); - sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; - err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL); + err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL); fput_light(sock->file, fput_needed); out: return err; } -SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) -{ - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; - return __sys_sendmsg(fd, msg, flags); -} - /* * Linux sendmmsg interface */ @@ -2125,16 +2117,15 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, while (datagrams < vlen) { if (MSG_CMSG_COMPAT & flags) { - err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry, - &msg_sys, flags, &used_address); + err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry, + &msg_sys, flags, &used_address); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { - err = ___sys_sendmsg(sock, - (struct msghdr __user *)entry, - &msg_sys, flags, &used_address); + err = __sys_sendmsg(sock, (struct msghdr __user *)entry, + &msg_sys, flags, &used_address); if (err < 0) break; err = put_user(err, &entry->msg_len); @@ -2158,12 +2149,10 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; return __sys_sendmmsg(fd, mmsg, vlen, flags); } -static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, +static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = @@ -2255,31 +2244,23 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, * BSD recvmsg interface */ -long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags) +SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, + unsigned int, flags) { int fput_needed, err; struct msghdr msg_sys; - struct socket *sock; + struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); - sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; - err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); + err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } -SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, - unsigned int, flags) -{ - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; - return __sys_recvmsg(fd, msg, flags); -} - /* * Linux recvmmsg interface */ @@ -2317,18 +2298,17 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { - err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry, - &msg_sys, flags & ~MSG_WAITFORONE, - datagrams); + err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, + &msg_sys, flags & ~MSG_WAITFORONE, + datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { - err = ___sys_recvmsg(sock, - (struct msghdr __user *)entry, - &msg_sys, flags & ~MSG_WAITFORONE, - datagrams); + err = __sys_recvmsg(sock, (struct msghdr __user *)entry, + &msg_sys, flags & ~MSG_WAITFORONE, + datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); @@ -2395,9 +2375,6 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, int datagrams; struct timespec timeout_sys; - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; - if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); diff --git a/trunk/net/sunrpc/auth_gss/auth_gss.c b/trunk/net/sunrpc/auth_gss/auth_gss.c index fc2f78d6a9b4..7da6b457f66a 100644 --- a/trunk/net/sunrpc/auth_gss/auth_gss.c +++ b/trunk/net/sunrpc/auth_gss/auth_gss.c @@ -52,8 +52,6 @@ #include #include -#include "../netns.h" - static const struct rpc_authops authgss_ops; static const struct rpc_credops gss_credops; @@ -87,6 +85,8 @@ struct gss_auth { }; /* pipe_version >= 0 if and only if someone has a pipe open. */ +static int pipe_version = -1; +static atomic_t pipe_users = ATOMIC_INIT(0); static DEFINE_SPINLOCK(pipe_version_lock); static struct rpc_wait_queue pipe_version_rpc_waitqueue; static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); @@ -266,27 +266,24 @@ struct gss_upcall_msg { char databuf[UPCALL_BUF_LEN]; }; -static int get_pipe_version(struct net *net) +static int get_pipe_version(void) { - struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); int ret; spin_lock(&pipe_version_lock); - if (sn->pipe_version >= 0) { - atomic_inc(&sn->pipe_users); - ret = sn->pipe_version; + if (pipe_version >= 0) { + atomic_inc(&pipe_users); + ret = pipe_version; } else ret = -EAGAIN; spin_unlock(&pipe_version_lock); return ret; } -static void put_pipe_version(struct net *net) +static void put_pipe_version(void) { - struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); - - if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) { - sn->pipe_version = -1; + if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) { + pipe_version = -1; spin_unlock(&pipe_version_lock); } } @@ -294,10 +291,9 @@ static void put_pipe_version(struct net *net) static void gss_release_msg(struct gss_upcall_msg *gss_msg) { - struct net *net = rpc_net_ns(gss_msg->auth->client); if (!atomic_dec_and_test(&gss_msg->count)) return; - put_pipe_version(net); + put_pipe_version(); BUG_ON(!list_empty(&gss_msg->list)); if (gss_msg->ctx != NULL) gss_put_ctx(gss_msg->ctx); @@ -443,10 +439,7 @@ static void gss_encode_msg(struct gss_upcall_msg *gss_msg, struct rpc_clnt *clnt, const char *service_name) { - struct net *net = rpc_net_ns(clnt); - struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); - - if (sn->pipe_version == 0) + if (pipe_version == 0) gss_encode_v0_msg(gss_msg); else /* pipe_version == 1 */ gss_encode_v1_msg(gss_msg, clnt, service_name); @@ -462,7 +455,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt, gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); if (gss_msg == NULL) return ERR_PTR(-ENOMEM); - vers = get_pipe_version(rpc_net_ns(clnt)); + vers = get_pipe_version(); if (vers < 0) { kfree(gss_msg); return ERR_PTR(vers); @@ -566,34 +559,24 @@ gss_refresh_upcall(struct rpc_task *task) static inline int gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) { - struct net *net = rpc_net_ns(gss_auth->client); - struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); struct rpc_pipe *pipe; struct rpc_cred *cred = &gss_cred->gc_base; struct gss_upcall_msg *gss_msg; - unsigned long timeout; DEFINE_WAIT(wait); - int err; + int err = 0; dprintk("RPC: %s for uid %u\n", __func__, from_kuid(&init_user_ns, cred->cr_uid)); retry: - err = 0; - /* Default timeout is 15s unless we know that gssd is not running */ - timeout = 15 * HZ; - if (!sn->gssd_running) - timeout = HZ >> 2; gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); if (PTR_ERR(gss_msg) == -EAGAIN) { err = wait_event_interruptible_timeout(pipe_version_waitqueue, - sn->pipe_version >= 0, timeout); - if (sn->pipe_version < 0) { - if (err == 0) - sn->gssd_running = 0; + pipe_version >= 0, 15*HZ); + if (pipe_version < 0) { warn_gssd(); err = -EACCES; } - if (err < 0) + if (err) goto out; goto retry; } @@ -724,22 +707,20 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) static int gss_pipe_open(struct inode *inode, int new_version) { - struct net *net = inode->i_sb->s_fs_info; - struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); int ret = 0; spin_lock(&pipe_version_lock); - if (sn->pipe_version < 0) { + if (pipe_version < 0) { /* First open of any gss pipe determines the version: */ - sn->pipe_version = new_version; + pipe_version = new_version; rpc_wake_up(&pipe_version_rpc_waitqueue); wake_up(&pipe_version_waitqueue); - } else if (sn->pipe_version != new_version) { + } else if (pipe_version != new_version) { /* Trying to open a pipe of a different version */ ret = -EBUSY; goto out; } - atomic_inc(&sn->pipe_users); + atomic_inc(&pipe_users); out: spin_unlock(&pipe_version_lock); return ret; @@ -759,7 +740,6 @@ static int gss_pipe_open_v1(struct inode *inode) static void gss_pipe_release(struct inode *inode) { - struct net *net = inode->i_sb->s_fs_info; struct rpc_pipe *pipe = RPC_I(inode)->pipe; struct gss_upcall_msg *gss_msg; @@ -778,7 +758,7 @@ gss_pipe_release(struct inode *inode) } spin_unlock(&pipe->lock); - put_pipe_version(net); + put_pipe_version(); } static void diff --git a/trunk/net/sunrpc/auth_gss/svcauth_gss.c b/trunk/net/sunrpc/auth_gss/svcauth_gss.c index 29b4ba93ab3c..871c73c92165 100644 --- a/trunk/net/sunrpc/auth_gss/svcauth_gss.c +++ b/trunk/net/sunrpc/auth_gss/svcauth_gss.c @@ -1287,7 +1287,7 @@ static bool use_gss_proxy(struct net *net) #ifdef CONFIG_PROC_FS -static int set_gss_proxy(struct net *net, int type) +static bool set_gss_proxy(struct net *net, int type) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); int ret = 0; @@ -1317,12 +1317,10 @@ static inline bool gssp_ready(struct sunrpc_net *sn) return false; } -static int wait_for_gss_proxy(struct net *net, struct file *file) +static int wait_for_gss_proxy(struct net *net) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); - if (file->f_flags & O_NONBLOCK && !gssp_ready(sn)) - return -EAGAIN; return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn)); } @@ -1364,7 +1362,7 @@ static ssize_t read_gssp(struct file *file, char __user *buf, size_t len; int ret; - ret = wait_for_gss_proxy(net, file); + ret = wait_for_gss_proxy(net); if (ret) return ret; diff --git a/trunk/net/sunrpc/netns.h b/trunk/net/sunrpc/netns.h index 74d948f5d5a1..7111a4c9113b 100644 --- a/trunk/net/sunrpc/netns.h +++ b/trunk/net/sunrpc/netns.h @@ -28,11 +28,7 @@ struct sunrpc_net { wait_queue_head_t gssp_wq; struct rpc_clnt *gssp_clnt; int use_gss_proxy; - int pipe_version; - atomic_t pipe_users; struct proc_dir_entry *use_gssp_proc; - - unsigned int gssd_running; }; extern int sunrpc_net_id; diff --git a/trunk/net/sunrpc/rpc_pipe.c b/trunk/net/sunrpc/rpc_pipe.c index e7ce4b3eb0bd..a9129f8d7070 100644 --- a/trunk/net/sunrpc/rpc_pipe.c +++ b/trunk/net/sunrpc/rpc_pipe.c @@ -216,14 +216,11 @@ rpc_destroy_inode(struct inode *inode) static int rpc_pipe_open(struct inode *inode, struct file *filp) { - struct net *net = inode->i_sb->s_fs_info; - struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); struct rpc_pipe *pipe; int first_open; int res = -ENXIO; mutex_lock(&inode->i_mutex); - sn->gssd_running = 1; pipe = RPC_I(inode)->pipe; if (pipe == NULL) goto out; @@ -1072,8 +1069,6 @@ void rpc_pipefs_init_net(struct net *net) struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); mutex_init(&sn->pipefs_sb_lock); - sn->gssd_running = 1; - sn->pipe_version = -1; } /* diff --git a/trunk/net/sunrpc/sched.c b/trunk/net/sunrpc/sched.c index 5356b120dbf8..f8529fc8e542 100644 --- a/trunk/net/sunrpc/sched.c +++ b/trunk/net/sunrpc/sched.c @@ -324,17 +324,11 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); * Note: If the task is ASYNC, and is being made runnable after sitting on an * rpc_wait_queue, this must be called with the queue spinlock held to protect * the wait queue operation. - * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), - * which is needed to ensure that __rpc_execute() doesn't loop (due to the - * lockless RPC_IS_QUEUED() test) before we've had a chance to test - * the RPC_TASK_RUNNING flag. */ static void rpc_make_runnable(struct rpc_task *task) { - bool need_wakeup = !rpc_test_and_set_running(task); - rpc_clear_queued(task); - if (!need_wakeup) + if (rpc_test_and_set_running(task)) return; if (RPC_IS_ASYNC(task)) { INIT_WORK(&task->u.tk_work, rpc_async_schedule); diff --git a/trunk/net/sunrpc/svcauth_unix.c b/trunk/net/sunrpc/svcauth_unix.c index 06bdf5a1082c..c3f9e1ef7f53 100644 --- a/trunk/net/sunrpc/svcauth_unix.c +++ b/trunk/net/sunrpc/svcauth_unix.c @@ -810,15 +810,11 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) goto badcred; argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */ argv->iov_len -= slen*4; - /* - * Note: we skip uid_valid()/gid_valid() checks here for - * backwards compatibility with clients that use -1 id's. - * Instead, -1 uid or gid is later mapped to the - * (export-specific) anonymous id by nfsd_setuser. - * Supplementary gid's will be left alone. - */ + cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */ cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */ + if (!uid_valid(cred->cr_uid) || !gid_valid(cred->cr_gid)) + goto badcred; slen = svc_getnl(argv); /* gids length */ if (slen > 16 || (len -= (slen + 2)*4) < 0) goto badcred; @@ -827,6 +823,8 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) return SVC_CLOSE; for (i = 0; i < slen; i++) { kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); + if (!gid_valid(kgid)) + goto badcred; GROUP_AT(cred->cr_group_info, i) = kgid; } if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { diff --git a/trunk/net/wireless/core.c b/trunk/net/wireless/core.c index 73405e00c800..84c9ad7e1dca 100644 --- a/trunk/net/wireless/core.c +++ b/trunk/net/wireless/core.c @@ -638,21 +638,17 @@ int wiphy_register(struct wiphy *wiphy) * cfg80211_mutex lock */ res = rfkill_register(rdev->rfkill); - if (res) { - device_del(&rdev->wiphy.dev); - - mutex_lock(&cfg80211_mutex); - debugfs_remove_recursive(rdev->wiphy.debugfsdir); - list_del_rcu(&rdev->list); - wiphy_regulatory_deregister(wiphy); - mutex_unlock(&cfg80211_mutex); - return res; - } + if (res) + goto out_rm_dev; rtnl_lock(); rdev->wiphy.registered = true; rtnl_unlock(); return 0; + +out_rm_dev: + device_del(&rdev->wiphy.dev); + return res; } EXPORT_SYMBOL(wiphy_register); @@ -870,6 +866,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, #endif __cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, true); + cfg80211_mlme_down(rdev, dev); wdev_unlock(wdev); break; case NL80211_IFTYPE_MESH_POINT: diff --git a/trunk/net/wireless/nl80211.c b/trunk/net/wireless/nl80211.c index b14b7e3cb6e6..afa283841e8c 100644 --- a/trunk/net/wireless/nl80211.c +++ b/trunk/net/wireless/nl80211.c @@ -1564,17 +1564,12 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) struct cfg80211_registered_device *dev; s64 filter_wiphy = -1; bool split = false; - struct nlattr **tb; + struct nlattr **tb = nl80211_fam.attrbuf; int res; - /* will be zeroed in nlmsg_parse() */ - tb = kmalloc(sizeof(*tb) * (NL80211_ATTR_MAX + 1), GFP_KERNEL); - if (!tb) - return -ENOMEM; - mutex_lock(&cfg80211_mutex); res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, - tb, NL80211_ATTR_MAX, nl80211_policy); + tb, nl80211_fam.maxattr, nl80211_policy); if (res == 0) { split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP]; if (tb[NL80211_ATTR_WIPHY]) @@ -1588,7 +1583,6 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) netdev = dev_get_by_index(sock_net(skb->sk), ifidx); if (!netdev) { mutex_unlock(&cfg80211_mutex); - kfree(tb); return -ENODEV; } if (netdev->ieee80211_ptr) { @@ -1599,7 +1593,6 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) dev_put(netdev); } } - kfree(tb); list_for_each_entry(dev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) @@ -3418,7 +3411,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq, (u32)sinfo->rx_bytes)) goto nla_put_failure; if ((sinfo->filled & (STATION_INFO_TX_BYTES | - STATION_INFO_TX_BYTES64)) && + NL80211_STA_INFO_TX_BYTES64)) && nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, (u32)sinfo->tx_bytes)) goto nla_put_failure; @@ -7584,8 +7577,6 @@ static int nl80211_send_wowlan_tcp(struct sk_buff *msg, &tcp->payload_tok)) return -ENOBUFS; - nla_nest_end(msg, nl_tcp); - return 0; } @@ -9979,7 +9970,6 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || (sig_dbm && nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || @@ -10020,7 +10010,6 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) || - nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || nla_put(msg, NL80211_ATTR_FRAME, len, buf) || nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || (ack && nla_put_flag(msg, NL80211_ATTR_ACK))) diff --git a/trunk/net/wireless/sme.c b/trunk/net/wireless/sme.c index 3ed35c345cae..a9dc5c736df0 100644 --- a/trunk/net/wireless/sme.c +++ b/trunk/net/wireless/sme.c @@ -231,9 +231,6 @@ void cfg80211_conn_work(struct work_struct *work) mutex_lock(&rdev->sched_scan_mtx); list_for_each_entry(wdev, &rdev->wdev_list, list) { - if (!wdev->netdev) - continue; - wdev_lock(wdev); if (!netif_running(wdev->netdev)) { wdev_unlock(wdev); @@ -964,7 +961,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, /* was it connected by userspace SME? */ if (!wdev->conn) { cfg80211_mlme_down(rdev, dev); - goto disconnect; + return 0; } if (wdev->sme_state == CFG80211_SME_CONNECTING && @@ -990,7 +987,6 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, return err; } - disconnect: if (wdev->sme_state == CFG80211_SME_CONNECTED) __cfg80211_disconnected(dev, NULL, 0, 0, false); else if (wdev->sme_state == CFG80211_SME_CONNECTING) diff --git a/trunk/net/wireless/trace.h b/trunk/net/wireless/trace.h index 5755bc14abbd..ecd4fcec3c94 100644 --- a/trunk/net/wireless/trace.h +++ b/trunk/net/wireless/trace.h @@ -2441,7 +2441,6 @@ TRACE_EVENT(cfg80211_report_wowlan_wakeup, TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY - __field(bool, non_wireless) __field(bool, disconnect) __field(bool, magic_pkt) __field(bool, gtk_rekey_failure) @@ -2450,22 +2449,20 @@ TRACE_EVENT(cfg80211_report_wowlan_wakeup, __field(bool, rfkill_release) __field(s32, pattern_idx) __field(u32, packet_len) - __dynamic_array(u8, packet, - wakeup ? wakeup->packet_present_len : 0) + __dynamic_array(u8, packet, wakeup->packet_present_len) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; - __entry->non_wireless = !wakeup; - __entry->disconnect = wakeup ? wakeup->disconnect : false; - __entry->magic_pkt = wakeup ? wakeup->magic_pkt : false; - __entry->gtk_rekey_failure = wakeup ? wakeup->gtk_rekey_failure : false; - __entry->eap_identity_req = wakeup ? wakeup->eap_identity_req : false; - __entry->four_way_handshake = wakeup ? wakeup->four_way_handshake : false; - __entry->rfkill_release = wakeup ? wakeup->rfkill_release : false; - __entry->pattern_idx = wakeup ? wakeup->pattern_idx : false; - __entry->packet_len = wakeup ? wakeup->packet_len : false; - if (wakeup && wakeup->packet && wakeup->packet_present_len) + __entry->disconnect = wakeup->disconnect; + __entry->magic_pkt = wakeup->magic_pkt; + __entry->gtk_rekey_failure = wakeup->gtk_rekey_failure; + __entry->eap_identity_req = wakeup->eap_identity_req; + __entry->four_way_handshake = wakeup->four_way_handshake; + __entry->rfkill_release = wakeup->rfkill_release; + __entry->pattern_idx = wakeup->pattern_idx; + __entry->packet_len = wakeup->packet_len; + if (wakeup->packet && wakeup->packet_present_len) memcpy(__get_dynamic_array(packet), wakeup->packet, wakeup->packet_present_len); ), diff --git a/trunk/net/xfrm/xfrm_output.c b/trunk/net/xfrm/xfrm_output.c index 0cf003dfa8fc..bcfda8921b5b 100644 --- a/trunk/net/xfrm/xfrm_output.c +++ b/trunk/net/xfrm/xfrm_output.c @@ -64,7 +64,6 @@ static int xfrm_output_one(struct sk_buff *skb, int err) if (unlikely(x->km.state != XFRM_STATE_VALID)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID); - err = -EINVAL; goto error; } diff --git a/trunk/net/xfrm/xfrm_policy.c b/trunk/net/xfrm/xfrm_policy.c index ea970b8002a2..23cea0f74336 100644 --- a/trunk/net/xfrm/xfrm_policy.c +++ b/trunk/net/xfrm/xfrm_policy.c @@ -2557,12 +2557,11 @@ static void __xfrm_garbage_collect(struct net *net) } } -void xfrm_garbage_collect(struct net *net) +static void xfrm_garbage_collect(struct net *net) { flow_cache_flush(); __xfrm_garbage_collect(net); } -EXPORT_SYMBOL(xfrm_garbage_collect); static void xfrm_garbage_collect_deferred(struct net *net) { diff --git a/trunk/net/xfrm/xfrm_user.c b/trunk/net/xfrm/xfrm_user.c index 3f565e495ac6..aa778748c565 100644 --- a/trunk/net/xfrm/xfrm_user.c +++ b/trunk/net/xfrm/xfrm_user.c @@ -1681,8 +1681,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, out: xfrm_pol_put(xp); - if (delete && err == 0) - xfrm_garbage_collect(net); return err; } diff --git a/trunk/scripts/Makefile.lib b/trunk/scripts/Makefile.lib index f97869f1f09b..51bb3de680b6 100644 --- a/trunk/scripts/Makefile.lib +++ b/trunk/scripts/Makefile.lib @@ -149,7 +149,7 @@ cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ ld_flags = $(LDFLAGS) $(ldflags-y) -dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \ +dtc_cpp_flags = -Wp,-MD,$(depfile).pre -nostdinc \ -I$(srctree)/arch/$(SRCARCH)/boot/dts \ -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \ -undef -D__DTS__ @@ -264,14 +264,14 @@ $(obj)/%.dtb.S: $(obj)/%.dtb quiet_cmd_dtc = DTC $@ cmd_dtc = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \ $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 \ - -i $(dir $<) $(DTC_FLAGS) \ - -d $(depfile).dtc.tmp $(dtc-tmp) ; \ - cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile) + -i $(srctree)/arch/$(SRCARCH)/boot/dts $(DTC_FLAGS) \ + -d $(depfile).dtc $(dtc-tmp) ; \ + cat $(depfile).pre $(depfile).dtc > $(depfile) $(obj)/%.dtb: $(src)/%.dts FORCE $(call if_changed_dep,dtc) -dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp) +dtc-tmp = $(subst $(comma),_,$(dot-target).dts) # Bzip2 # --------------------------------------------------------------------------- diff --git a/trunk/scripts/config b/trunk/scripts/config index a65ecbbdd32a..bb4d3deb6d1c 100755 --- a/trunk/scripts/config +++ b/trunk/scripts/config @@ -105,7 +105,7 @@ while [ "$1" != "" ] ; do ;; --refresh) ;; - --*-after|-E|-D|-M) + --*-after) checkarg "$1" A=$ARG checkarg "$2" diff --git a/trunk/scripts/dtc/dtc-lexer.l b/trunk/scripts/dtc/dtc-lexer.l index 3b41bfca636c..254d5af88956 100644 --- a/trunk/scripts/dtc/dtc-lexer.l +++ b/trunk/scripts/dtc/dtc-lexer.l @@ -71,7 +71,7 @@ static int pop_input_file(void); push_input_file(name); } -<*>^"#"(line)?[ \t]+[0-9]+[ \t]+{STRING}([ \t]+[0-9]+)? { +<*>^"#"(line)?{WS}+[0-9]+{WS}+{STRING}({WS}+[0-9]+)? { char *line, *tmp, *fn; /* skip text before line # */ line = yytext; diff --git a/trunk/scripts/dtc/dtc-lexer.lex.c_shipped b/trunk/scripts/dtc/dtc-lexer.lex.c_shipped index 2d30f41778b7..a6c5fcdfc032 100644 --- a/trunk/scripts/dtc/dtc-lexer.lex.c_shipped +++ b/trunk/scripts/dtc/dtc-lexer.lex.c_shipped @@ -405,19 +405,19 @@ static yyconst flex_int16_t yy_accept[161] = static yyconst flex_int32_t yy_ec[256] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, - 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 2, 5, 6, 7, 1, 1, 8, 9, 1, - 1, 10, 11, 11, 12, 11, 13, 14, 15, 16, - 16, 16, 16, 16, 16, 16, 16, 17, 1, 18, - 19, 20, 11, 11, 21, 21, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 23, 22, 22, 22, 22, - 22, 22, 22, 22, 24, 22, 22, 25, 22, 22, - 1, 26, 27, 1, 22, 1, 21, 28, 29, 30, - - 31, 21, 22, 22, 32, 22, 22, 33, 34, 35, - 36, 37, 22, 38, 39, 40, 41, 42, 22, 25, - 43, 22, 44, 45, 46, 1, 1, 1, 1, 1, + 1, 2, 4, 5, 6, 1, 1, 7, 8, 1, + 1, 9, 10, 10, 11, 10, 12, 13, 14, 15, + 15, 15, 15, 15, 15, 15, 15, 16, 1, 17, + 18, 19, 10, 10, 20, 20, 20, 20, 20, 20, + 21, 21, 21, 21, 21, 22, 21, 21, 21, 21, + 21, 21, 21, 21, 23, 21, 21, 24, 21, 21, + 1, 25, 26, 1, 21, 1, 20, 27, 28, 29, + + 30, 20, 21, 21, 31, 21, 21, 32, 33, 34, + 35, 36, 21, 37, 38, 39, 40, 41, 21, 24, + 42, 21, 43, 44, 45, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -434,36 +434,36 @@ static yyconst flex_int32_t yy_ec[256] = 1, 1, 1, 1, 1 } ; -static yyconst flex_int32_t yy_meta[47] = +static yyconst flex_int32_t yy_meta[46] = { 0, - 1, 1, 1, 1, 1, 1, 2, 3, 1, 2, - 2, 2, 4, 5, 5, 5, 6, 1, 1, 1, - 7, 8, 8, 8, 8, 1, 1, 7, 7, 7, - 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 3, 1, 1 + 1, 1, 1, 1, 1, 2, 3, 1, 2, 2, + 2, 4, 5, 5, 5, 6, 1, 1, 1, 7, + 8, 8, 8, 8, 1, 1, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 3, 1, 1 } ; static yyconst flex_int16_t yy_base[175] = { 0, - 0, 385, 378, 40, 41, 383, 72, 382, 34, 44, - 388, 393, 61, 117, 368, 116, 115, 115, 115, 48, - 367, 107, 368, 339, 127, 120, 0, 147, 393, 0, - 127, 0, 133, 156, 168, 153, 393, 125, 393, 380, - 393, 0, 369, 127, 393, 160, 371, 377, 347, 21, - 343, 346, 393, 393, 393, 393, 393, 359, 393, 393, - 183, 343, 339, 393, 356, 0, 183, 340, 187, 348, - 347, 0, 0, 0, 178, 359, 195, 365, 354, 326, - 332, 325, 334, 328, 204, 326, 331, 324, 393, 335, - 150, 311, 343, 342, 315, 322, 340, 179, 313, 207, - - 319, 316, 317, 393, 337, 333, 305, 302, 311, 301, - 310, 190, 338, 337, 393, 307, 322, 301, 305, 277, - 208, 311, 307, 278, 271, 270, 248, 246, 213, 130, - 393, 393, 263, 235, 207, 221, 218, 229, 213, 213, - 206, 234, 218, 210, 208, 193, 219, 393, 223, 204, - 176, 157, 393, 393, 120, 106, 97, 119, 393, 393, - 245, 251, 259, 263, 267, 273, 280, 284, 292, 300, - 304, 310, 318, 326 + 0, 388, 381, 40, 41, 386, 71, 385, 34, 44, + 390, 395, 60, 62, 371, 112, 111, 111, 111, 104, + 370, 106, 371, 342, 124, 119, 0, 144, 395, 0, + 123, 0, 159, 153, 165, 167, 395, 130, 395, 382, + 395, 0, 372, 122, 395, 157, 374, 379, 350, 21, + 346, 349, 395, 395, 395, 395, 395, 362, 395, 395, + 181, 346, 342, 395, 359, 0, 191, 343, 190, 351, + 350, 0, 0, 0, 173, 362, 177, 367, 357, 329, + 335, 328, 337, 331, 206, 329, 334, 327, 395, 338, + 170, 314, 346, 345, 318, 325, 343, 158, 316, 212, + + 322, 319, 320, 395, 340, 336, 308, 305, 314, 304, + 295, 138, 208, 220, 395, 292, 305, 265, 264, 254, + 201, 222, 285, 275, 273, 270, 236, 235, 225, 115, + 395, 395, 252, 216, 216, 217, 214, 230, 209, 220, + 213, 239, 211, 217, 216, 209, 229, 395, 240, 225, + 206, 169, 395, 395, 116, 106, 99, 54, 395, 395, + 254, 260, 268, 272, 276, 282, 289, 293, 301, 309, + 313, 319, 327, 335 } ; static yyconst flex_int16_t yy_def[175] = @@ -489,108 +489,108 @@ static yyconst flex_int16_t yy_def[175] = 160, 160, 160, 160 } ; -static yyconst flex_int16_t yy_nxt[440] = +static yyconst flex_int16_t yy_nxt[441] = { 0, - 12, 13, 14, 13, 15, 16, 12, 17, 18, 12, - 12, 12, 19, 12, 12, 12, 12, 20, 21, 22, - 23, 23, 23, 23, 23, 12, 12, 23, 23, 23, + 12, 13, 14, 15, 16, 12, 17, 18, 12, 12, + 12, 19, 12, 12, 12, 12, 20, 21, 22, 23, + 23, 23, 23, 23, 12, 12, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, - 23, 23, 23, 12, 24, 12, 25, 34, 35, 35, - 25, 81, 26, 26, 27, 27, 27, 34, 35, 35, - 82, 28, 36, 36, 36, 53, 54, 29, 28, 28, - 28, 28, 12, 13, 14, 13, 15, 16, 30, 17, - 18, 30, 30, 30, 26, 30, 30, 30, 12, 20, - 21, 22, 31, 31, 31, 31, 31, 32, 12, 31, + 23, 23, 12, 24, 12, 25, 34, 35, 35, 25, + 81, 26, 26, 27, 27, 27, 34, 35, 35, 82, + 28, 36, 36, 36, 36, 159, 29, 28, 28, 28, + 28, 12, 13, 14, 15, 16, 30, 17, 18, 30, + 30, 30, 26, 30, 30, 30, 12, 20, 21, 22, + 31, 31, 31, 31, 31, 32, 12, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, - 31, 31, 31, 31, 31, 12, 24, 12, 36, 36, - 36, 39, 41, 45, 47, 56, 57, 48, 61, 47, - 39, 159, 48, 66, 61, 45, 66, 66, 66, 158, - 46, 40, 49, 59, 50, 157, 51, 49, 52, 50, - 40, 63, 46, 52, 36, 36, 36, 156, 43, 62, - 65, 65, 65, 59, 136, 68, 137, 65, 75, 69, - 69, 69, 70, 71, 65, 65, 65, 65, 70, 71, - 72, 69, 69, 69, 61, 46, 45, 155, 154, 66, - 70, 71, 66, 66, 66, 122, 85, 85, 85, 59, - - 69, 69, 69, 46, 77, 100, 109, 93, 100, 70, - 71, 110, 112, 122, 129, 123, 153, 85, 85, 85, - 135, 135, 135, 148, 148, 160, 135, 135, 135, 152, - 142, 142, 142, 123, 143, 142, 142, 142, 151, 143, - 150, 146, 145, 149, 149, 38, 38, 38, 38, 38, - 38, 38, 38, 42, 144, 141, 140, 42, 42, 44, - 44, 44, 44, 44, 44, 44, 44, 58, 58, 58, - 58, 64, 139, 64, 66, 138, 134, 66, 133, 66, - 66, 67, 132, 131, 67, 67, 67, 67, 73, 130, - 73, 73, 76, 76, 76, 76, 76, 76, 76, 76, - - 78, 78, 78, 78, 78, 78, 78, 78, 91, 160, - 91, 92, 129, 92, 92, 128, 92, 92, 121, 121, - 121, 121, 121, 121, 121, 121, 147, 147, 147, 147, - 147, 147, 147, 147, 127, 126, 125, 124, 61, 61, - 120, 119, 118, 117, 116, 115, 47, 114, 110, 113, - 111, 108, 107, 106, 48, 105, 104, 89, 103, 102, - 101, 99, 98, 97, 96, 95, 94, 79, 77, 90, - 89, 88, 59, 87, 86, 59, 84, 83, 80, 79, - 77, 74, 160, 60, 59, 55, 37, 160, 33, 25, - 26, 25, 11, 160, 160, 160, 160, 160, 160, 160, + 31, 31, 31, 12, 24, 12, 39, 41, 45, 47, + 53, 54, 48, 56, 57, 61, 61, 47, 66, 45, + 48, 66, 66, 66, 39, 46, 40, 49, 59, 50, + 158, 51, 122, 52, 157, 49, 46, 50, 136, 63, + 137, 52, 156, 43, 40, 62, 65, 65, 65, 59, + 61, 61, 123, 65, 75, 69, 69, 69, 36, 36, + 65, 65, 65, 65, 70, 71, 72, 69, 69, 69, + 45, 46, 61, 61, 109, 77, 70, 71, 93, 110, + 68, 70, 71, 85, 85, 85, 66, 46, 155, 66, + + 66, 66, 69, 69, 69, 122, 59, 100, 100, 61, + 61, 70, 71, 100, 100, 148, 112, 154, 85, 85, + 85, 61, 61, 129, 129, 123, 129, 129, 135, 135, + 135, 142, 142, 148, 143, 149, 153, 135, 135, 135, + 142, 142, 160, 143, 152, 151, 150, 146, 145, 144, + 141, 140, 139, 149, 38, 38, 38, 38, 38, 38, + 38, 38, 42, 138, 134, 133, 42, 42, 44, 44, + 44, 44, 44, 44, 44, 44, 58, 58, 58, 58, + 64, 132, 64, 66, 131, 130, 66, 160, 66, 66, + 67, 128, 127, 67, 67, 67, 67, 73, 126, 73, + + 73, 76, 76, 76, 76, 76, 76, 76, 76, 78, + 78, 78, 78, 78, 78, 78, 78, 91, 125, 91, + 92, 124, 92, 92, 120, 92, 92, 121, 121, 121, + 121, 121, 121, 121, 121, 147, 147, 147, 147, 147, + 147, 147, 147, 119, 118, 117, 116, 115, 47, 114, + 110, 113, 111, 108, 107, 106, 48, 105, 104, 89, + 103, 102, 101, 99, 98, 97, 96, 95, 94, 79, + 77, 90, 89, 88, 59, 87, 86, 59, 84, 83, + 80, 79, 77, 74, 160, 60, 59, 55, 37, 160, + 33, 25, 26, 25, 11, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 160, 160 + 160, 160, 160, 160, 160, 160, 160, 160, 160, 160 } ; -static yyconst flex_int16_t yy_chk[440] = +static yyconst flex_int16_t yy_chk[441] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 4, 9, 9, 9, - 10, 50, 4, 5, 5, 5, 5, 10, 10, 10, - 50, 5, 13, 13, 13, 20, 20, 5, 5, 5, - 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, + 1, 1, 1, 1, 1, 4, 9, 9, 9, 10, + 50, 4, 5, 5, 5, 5, 10, 10, 10, 50, + 5, 13, 13, 14, 14, 158, 5, 5, 5, 5, + 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 14, 14, - 14, 16, 17, 18, 19, 22, 22, 19, 25, 26, - 38, 158, 26, 31, 33, 44, 31, 31, 31, 157, - 18, 16, 19, 31, 19, 156, 19, 26, 19, 26, - 38, 26, 44, 26, 36, 36, 36, 155, 17, 25, - 28, 28, 28, 28, 130, 33, 130, 28, 46, 34, - 34, 34, 91, 91, 28, 28, 28, 28, 34, 34, - 34, 35, 35, 35, 61, 46, 75, 152, 151, 67, - 35, 35, 67, 67, 67, 112, 61, 61, 61, 67, - - 69, 69, 69, 75, 77, 85, 98, 77, 100, 69, - 69, 98, 100, 121, 129, 112, 150, 85, 85, 85, - 135, 135, 135, 143, 147, 149, 129, 129, 129, 146, - 138, 138, 138, 121, 138, 142, 142, 142, 145, 142, - 144, 141, 140, 143, 147, 161, 161, 161, 161, 161, - 161, 161, 161, 162, 139, 137, 136, 162, 162, 163, - 163, 163, 163, 163, 163, 163, 163, 164, 164, 164, - 164, 165, 134, 165, 166, 133, 128, 166, 127, 166, - 166, 167, 126, 125, 167, 167, 167, 167, 168, 124, - 168, 168, 169, 169, 169, 169, 169, 169, 169, 169, - - 170, 170, 170, 170, 170, 170, 170, 170, 171, 123, - 171, 172, 122, 172, 172, 120, 172, 172, 173, 173, - 173, 173, 173, 173, 173, 173, 174, 174, 174, 174, - 174, 174, 174, 174, 119, 118, 117, 116, 114, 113, - 111, 110, 109, 108, 107, 106, 105, 103, 102, 101, - 99, 97, 96, 95, 94, 93, 92, 90, 88, 87, - 86, 84, 83, 82, 81, 80, 79, 78, 76, 71, - 70, 68, 65, 63, 62, 58, 52, 51, 49, 48, - 47, 43, 40, 24, 23, 21, 15, 11, 8, 6, - 3, 2, 160, 160, 160, 160, 160, 160, 160, 160, + 7, 7, 7, 7, 7, 7, 16, 17, 18, 19, + 20, 20, 19, 22, 22, 25, 25, 26, 31, 44, + 26, 31, 31, 31, 38, 18, 16, 19, 31, 19, + 157, 19, 112, 19, 156, 26, 44, 26, 130, 26, + 130, 26, 155, 17, 38, 25, 28, 28, 28, 28, + 33, 33, 112, 28, 46, 34, 34, 34, 36, 36, + 28, 28, 28, 28, 34, 34, 34, 35, 35, 35, + 75, 46, 61, 61, 98, 77, 35, 35, 77, 98, + 33, 91, 91, 61, 61, 61, 67, 75, 152, 67, + + 67, 67, 69, 69, 69, 121, 67, 85, 85, 113, + 113, 69, 69, 100, 100, 143, 100, 151, 85, 85, + 85, 114, 114, 122, 122, 121, 129, 129, 135, 135, + 135, 138, 138, 147, 138, 143, 150, 129, 129, 129, + 142, 142, 149, 142, 146, 145, 144, 141, 140, 139, + 137, 136, 134, 147, 161, 161, 161, 161, 161, 161, + 161, 161, 162, 133, 128, 127, 162, 162, 163, 163, + 163, 163, 163, 163, 163, 163, 164, 164, 164, 164, + 165, 126, 165, 166, 125, 124, 166, 123, 166, 166, + 167, 120, 119, 167, 167, 167, 167, 168, 118, 168, + + 168, 169, 169, 169, 169, 169, 169, 169, 169, 170, + 170, 170, 170, 170, 170, 170, 170, 171, 117, 171, + 172, 116, 172, 172, 111, 172, 172, 173, 173, 173, + 173, 173, 173, 173, 173, 174, 174, 174, 174, 174, + 174, 174, 174, 110, 109, 108, 107, 106, 105, 103, + 102, 101, 99, 97, 96, 95, 94, 93, 92, 90, + 88, 87, 86, 84, 83, 82, 81, 80, 79, 78, + 76, 71, 70, 68, 65, 63, 62, 58, 52, 51, + 49, 48, 47, 43, 40, 24, 23, 21, 15, 11, + 8, 6, 3, 2, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 160, 160 + 160, 160, 160, 160, 160, 160, 160, 160, 160, 160 } ; static yy_state_type yy_last_accepting_state; diff --git a/trunk/scripts/dtc/dtc-parser.tab.c_shipped b/trunk/scripts/dtc/dtc-parser.tab.c_shipped index ee1d8c3042fb..4af55900a15b 100644 --- a/trunk/scripts/dtc/dtc-parser.tab.c_shipped +++ b/trunk/scripts/dtc/dtc-parser.tab.c_shipped @@ -1,8 +1,10 @@ -/* A Bison parser, made by GNU Bison 2.5. */ -/* Bison implementation for Yacc-like parsers in C +/* A Bison parser, made by GNU Bison 2.4.1. */ + +/* Skeleton implementation for Bison's Yacc-like parsers in C - Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc. + Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 + Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -44,7 +46,7 @@ #define YYBISON 1 /* Bison version. */ -#define YYBISON_VERSION "2.5" +#define YYBISON_VERSION "2.4.1" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" @@ -65,7 +67,7 @@ /* Copy the first part of user declarations. */ -/* Line 268 of yacc.c */ +/* Line 189 of yacc.c */ #line 21 "dtc-parser.y" #include @@ -86,8 +88,8 @@ static unsigned long long eval_literal(const char *s, int base, int bits); static unsigned char eval_char_literal(const char *s); -/* Line 268 of yacc.c */ -#line 91 "dtc-parser.tab.c" +/* Line 189 of yacc.c */ +#line 93 "dtc-parser.tab.c" /* Enabling traces. */ #ifndef YYDEBUG @@ -145,7 +147,7 @@ static unsigned char eval_char_literal(const char *s); typedef union YYSTYPE { -/* Line 293 of yacc.c */ +/* Line 214 of yacc.c */ #line 40 "dtc-parser.y" char *propnodename; @@ -169,8 +171,8 @@ typedef union YYSTYPE -/* Line 293 of yacc.c */ -#line 174 "dtc-parser.tab.c" +/* Line 214 of yacc.c */ +#line 176 "dtc-parser.tab.c" } YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 # define yystype YYSTYPE /* obsolescent; will be withdrawn */ @@ -181,8 +183,8 @@ typedef union YYSTYPE /* Copy the second part of user declarations. */ -/* Line 343 of yacc.c */ -#line 186 "dtc-parser.tab.c" +/* Line 264 of yacc.c */ +#line 188 "dtc-parser.tab.c" #ifdef short # undef short @@ -232,7 +234,7 @@ typedef short int yytype_int16; #define YYSIZE_MAXIMUM ((YYSIZE_T) -1) #ifndef YY_ -# if defined YYENABLE_NLS && YYENABLE_NLS +# if YYENABLE_NLS # if ENABLE_NLS # include /* INFRINGES ON USER NAME SPACE */ # define YY_(msgid) dgettext ("bison-runtime", msgid) @@ -285,11 +287,11 @@ YYID (yyi) # define alloca _alloca # else # define YYSTACK_ALLOC alloca -# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ +# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) # include /* INFRINGES ON USER NAME SPACE */ -# ifndef EXIT_SUCCESS -# define EXIT_SUCCESS 0 +# ifndef _STDLIB_H +# define _STDLIB_H 1 # endif # endif # endif @@ -312,24 +314,24 @@ YYID (yyi) # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif -# if (defined __cplusplus && ! defined EXIT_SUCCESS \ +# if (defined __cplusplus && ! defined _STDLIB_H \ && ! ((defined YYMALLOC || defined malloc) \ && (defined YYFREE || defined free))) # include /* INFRINGES ON USER NAME SPACE */ -# ifndef EXIT_SUCCESS -# define EXIT_SUCCESS 0 +# ifndef _STDLIB_H +# define _STDLIB_H 1 # endif # endif # ifndef YYMALLOC # define YYMALLOC malloc -# if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ +# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free -# if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ +# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif @@ -358,7 +360,23 @@ union yyalloc ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) -# define YYCOPY_NEEDED 1 +/* Copy COUNT objects from FROM to TO. The source and destination do + not overlap. */ +# ifndef YYCOPY +# if defined __GNUC__ && 1 < __GNUC__ +# define YYCOPY(To, From, Count) \ + __builtin_memcpy (To, From, (Count) * sizeof (*(From))) +# else +# define YYCOPY(To, From, Count) \ + do \ + { \ + YYSIZE_T yyi; \ + for (yyi = 0; yyi < (Count); yyi++) \ + (To)[yyi] = (From)[yyi]; \ + } \ + while (YYID (0)) +# endif +# endif /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of @@ -378,26 +396,6 @@ union yyalloc #endif -#if defined YYCOPY_NEEDED && YYCOPY_NEEDED -/* Copy COUNT objects from FROM to TO. The source and destination do - not overlap. */ -# ifndef YYCOPY -# if defined __GNUC__ && 1 < __GNUC__ -# define YYCOPY(To, From, Count) \ - __builtin_memcpy (To, From, (Count) * sizeof (*(From))) -# else -# define YYCOPY(To, From, Count) \ - do \ - { \ - YYSIZE_T yyi; \ - for (yyi = 0; yyi < (Count); yyi++) \ - (To)[yyi] = (From)[yyi]; \ - } \ - while (YYID (0)) -# endif -# endif -#endif /* !YYCOPY_NEEDED */ - /* YYFINAL -- State number of the termination state. */ #define YYFINAL 4 /* YYLAST -- Last index in YYTABLE. */ @@ -573,8 +571,8 @@ static const yytype_uint8 yyr2[] = 2, 0, 2, 2, 0, 2, 2, 2, 3, 2 }; -/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM. - Performed when YYTABLE doesn't specify something else to do. Zero +/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state + STATE-NUM when YYTABLE doesn't specify something else to do. Zero means the default is an error. */ static const yytype_uint8 yydefact[] = { @@ -635,7 +633,8 @@ static const yytype_int8 yypgoto[] = /* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule which - number is the opposite. If YYTABLE_NINF, syntax error. */ + number is the opposite. If zero, do what YYDEFACT says. + If YYTABLE_NINF, syntax error. */ #define YYTABLE_NINF -1 static const yytype_uint8 yytable[] = { @@ -655,12 +654,6 @@ static const yytype_uint8 yytable[] = 137, 0, 73, 139 }; -#define yypact_value_is_default(yystate) \ - ((yystate) == (-78)) - -#define yytable_value_is_error(yytable_value) \ - YYID (0) - static const yytype_int16 yycheck[] = { 5, 38, 39, 17, 18, 19, 12, 12, 17, 18, @@ -712,18 +705,9 @@ static const yytype_uint8 yystos[] = /* Like YYERROR except do call yyerror. This remains here temporarily to ease the transition to the new meaning of YYERROR, for GCC. - Once GCC version 2 has supplanted version 1, this can go. However, - YYFAIL appears to be in use. Nevertheless, it is formally deprecated - in Bison 2.4.2's NEWS entry, where a plan to phase it out is - discussed. */ + Once GCC version 2 has supplanted version 1, this can go. */ #define YYFAIL goto yyerrlab -#if defined YYFAIL - /* This is here to suppress warnings from the GCC cpp's - -Wunused-macros. Normally we don't worry about that warning, but - some users do, and we want to make it easy for users to remove - YYFAIL uses, which will produce warnings from Bison 2.5. */ -#endif #define YYRECOVERING() (!!yyerrstatus) @@ -733,6 +717,7 @@ do \ { \ yychar = (Token); \ yylval = (Value); \ + yytoken = YYTRANSLATE (yychar); \ YYPOPSTACK (1); \ goto yybackup; \ } \ @@ -774,10 +759,19 @@ while (YYID (0)) #endif -/* This macro is provided for backward compatibility. */ +/* YY_LOCATION_PRINT -- Print the location on the stream. + This macro was not mandated originally: define only if we know + we won't break user code: when these are the locations we know. */ #ifndef YY_LOCATION_PRINT -# define YY_LOCATION_PRINT(File, Loc) ((void) 0) +# if YYLTYPE_IS_TRIVIAL +# define YY_LOCATION_PRINT(File, Loc) \ + fprintf (File, "%d.%d-%d.%d", \ + (Loc).first_line, (Loc).first_column, \ + (Loc).last_line, (Loc).last_column) +# else +# define YY_LOCATION_PRINT(File, Loc) ((void) 0) +# endif #endif @@ -969,6 +963,7 @@ int yydebug; # define YYMAXDEPTH 10000 #endif + #if YYERROR_VERBOSE @@ -1071,142 +1066,115 @@ yytnamerr (char *yyres, const char *yystr) } # endif -/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message - about the unexpected token YYTOKEN for the state stack whose top is - YYSSP. - - Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is - not large enough to hold the message. In that case, also set - *YYMSG_ALLOC to the required number of bytes. Return 2 if the - required number of bytes is too large to store. */ -static int -yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, - yytype_int16 *yyssp, int yytoken) +/* Copy into YYRESULT an error message about the unexpected token + YYCHAR while in state YYSTATE. Return the number of bytes copied, + including the terminating null byte. If YYRESULT is null, do not + copy anything; just return the number of bytes that would be + copied. As a special case, return 0 if an ordinary "syntax error" + message will do. Return YYSIZE_MAXIMUM if overflow occurs during + size calculation. */ +static YYSIZE_T +yysyntax_error (char *yyresult, int yystate, int yychar) { - YYSIZE_T yysize0 = yytnamerr (0, yytname[yytoken]); - YYSIZE_T yysize = yysize0; - YYSIZE_T yysize1; - enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; - /* Internationalized format string. */ - const char *yyformat = 0; - /* Arguments of yyformat. */ - char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; - /* Number of reported tokens (one for the "unexpected", one per - "expected"). */ - int yycount = 0; - - /* There are many possibilities here to consider: - - Assume YYFAIL is not used. It's too flawed to consider. See - - for details. YYERROR is fine as it does not invoke this - function. - - If this state is a consistent state with a default action, then - the only way this function was invoked is if the default action - is an error action. In that case, don't check for expected - tokens because there are none. - - The only way there can be no lookahead present (in yychar) is if - this state is a consistent state with a default action. Thus, - detecting the absence of a lookahead is sufficient to determine - that there is no unexpected or expected token to report. In that - case, just report a simple "syntax error". - - Don't assume there isn't a lookahead just because this state is a - consistent state with a default action. There might have been a - previous inconsistent state, consistent state with a non-default - action, or user semantic action that manipulated yychar. - - Of course, the expected token list depends on states to have - correct lookahead information, and it depends on the parser not - to perform extra reductions after fetching a lookahead from the - scanner and before detecting a syntax error. Thus, state merging - (from LALR or IELR) and default reductions corrupt the expected - token list. However, the list is correct for canonical LR with - one exception: it will still contain any token that will not be - accepted due to an error action in a later state. - */ - if (yytoken != YYEMPTY) - { - int yyn = yypact[*yyssp]; - yyarg[yycount++] = yytname[yytoken]; - if (!yypact_value_is_default (yyn)) - { - /* Start YYX at -YYN if negative to avoid negative indexes in - YYCHECK. In other words, skip the first -YYN actions for - this state because they are default actions. */ - int yyxbegin = yyn < 0 ? -yyn : 0; - /* Stay within bounds of both yycheck and yytname. */ - int yychecklim = YYLAST - yyn + 1; - int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; - int yyx; - - for (yyx = yyxbegin; yyx < yyxend; ++yyx) - if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR - && !yytable_value_is_error (yytable[yyx + yyn])) - { - if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) - { - yycount = 1; - yysize = yysize0; - break; - } - yyarg[yycount++] = yytname[yyx]; - yysize1 = yysize + yytnamerr (0, yytname[yyx]); - if (! (yysize <= yysize1 - && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) - return 2; - yysize = yysize1; - } - } - } + int yyn = yypact[yystate]; - switch (yycount) + if (! (YYPACT_NINF < yyn && yyn <= YYLAST)) + return 0; + else { -# define YYCASE_(N, S) \ - case N: \ - yyformat = S; \ - break - YYCASE_(0, YY_("syntax error")); - YYCASE_(1, YY_("syntax error, unexpected %s")); - YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); - YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); - YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); - YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); -# undef YYCASE_ - } + int yytype = YYTRANSLATE (yychar); + YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]); + YYSIZE_T yysize = yysize0; + YYSIZE_T yysize1; + int yysize_overflow = 0; + enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; + char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; + int yyx; + +# if 0 + /* This is so xgettext sees the translatable formats that are + constructed on the fly. */ + YY_("syntax error, unexpected %s"); + YY_("syntax error, unexpected %s, expecting %s"); + YY_("syntax error, unexpected %s, expecting %s or %s"); + YY_("syntax error, unexpected %s, expecting %s or %s or %s"); + YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"); +# endif + char *yyfmt; + char const *yyf; + static char const yyunexpected[] = "syntax error, unexpected %s"; + static char const yyexpecting[] = ", expecting %s"; + static char const yyor[] = " or %s"; + char yyformat[sizeof yyunexpected + + sizeof yyexpecting - 1 + + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2) + * (sizeof yyor - 1))]; + char const *yyprefix = yyexpecting; + + /* Start YYX at -YYN if negative to avoid negative indexes in + YYCHECK. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = YYLAST - yyn + 1; + int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + int yycount = 1; + + yyarg[0] = yytname[yytype]; + yyfmt = yystpcpy (yyformat, yyunexpected); + + for (yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) + { + if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) + { + yycount = 1; + yysize = yysize0; + yyformat[sizeof yyunexpected - 1] = '\0'; + break; + } + yyarg[yycount++] = yytname[yyx]; + yysize1 = yysize + yytnamerr (0, yytname[yyx]); + yysize_overflow |= (yysize1 < yysize); + yysize = yysize1; + yyfmt = yystpcpy (yyfmt, yyprefix); + yyprefix = yyor; + } - yysize1 = yysize + yystrlen (yyformat); - if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) - return 2; - yysize = yysize1; + yyf = YY_(yyformat); + yysize1 = yysize + yystrlen (yyf); + yysize_overflow |= (yysize1 < yysize); + yysize = yysize1; - if (*yymsg_alloc < yysize) - { - *yymsg_alloc = 2 * yysize; - if (! (yysize <= *yymsg_alloc - && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) - *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; - return 1; - } + if (yysize_overflow) + return YYSIZE_MAXIMUM; - /* Avoid sprintf, as that infringes on the user's name space. - Don't have undefined behavior even if the translation - produced a string with the wrong number of "%s"s. */ - { - char *yyp = *yymsg; - int yyi = 0; - while ((*yyp = *yyformat) != '\0') - if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) - { - yyp += yytnamerr (yyp, yyarg[yyi++]); - yyformat += 2; - } - else - { - yyp++; - yyformat++; - } - } - return 0; + if (yyresult) + { + /* Avoid sprintf, as that infringes on the user's name space. + Don't have undefined behavior even if the translation + produced a string with the wrong number of "%s"s. */ + char *yyp = yyresult; + int yyi = 0; + while ((*yyp = *yyf) != '\0') + { + if (*yyp == '%' && yyf[1] == 's' && yyi < yycount) + { + yyp += yytnamerr (yyp, yyarg[yyi++]); + yyf += 2; + } + else + { + yyp++; + yyf++; + } + } + } + return yysize; + } } #endif /* YYERROR_VERBOSE */ + /*-----------------------------------------------. | Release the memory associated to this symbol. | @@ -1239,7 +1207,6 @@ yydestruct (yymsg, yytype, yyvaluep) } } - /* Prevent warnings from -Wmissing-prototypes. */ #ifdef YYPARSE_PARAM #if defined __STDC__ || defined __cplusplus @@ -1266,9 +1233,10 @@ YYSTYPE yylval; int yynerrs; -/*----------. -| yyparse. | -`----------*/ + +/*-------------------------. +| yyparse or yypush_parse. | +`-------------------------*/ #ifdef YYPARSE_PARAM #if (defined __STDC__ || defined __C99__FUNC__ \ @@ -1292,6 +1260,8 @@ yyparse () #endif #endif { + + int yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; @@ -1446,7 +1416,7 @@ yybackup: /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; - if (yypact_value_is_default (yyn)) + if (yyn == YYPACT_NINF) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ @@ -1477,8 +1447,8 @@ yybackup: yyn = yytable[yyn]; if (yyn <= 0) { - if (yytable_value_is_error (yyn)) - goto yyerrlab; + if (yyn == 0 || yyn == YYTABLE_NINF) + goto yyerrlab; yyn = -yyn; goto yyreduce; } @@ -1533,72 +1503,72 @@ yyreduce: { case 2: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 110 "dtc-parser.y" { the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node), guess_boot_cpuid((yyvsp[(4) - (4)].node))); - } + ;} break; case 3: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 118 "dtc-parser.y" { (yyval.re) = NULL; - } + ;} break; case 4: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 122 "dtc-parser.y" { (yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re)); - } + ;} break; case 5: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 129 "dtc-parser.y" { (yyval.re) = build_reserve_entry((yyvsp[(2) - (4)].integer), (yyvsp[(3) - (4)].integer)); - } + ;} break; case 6: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 133 "dtc-parser.y" { add_label(&(yyvsp[(2) - (2)].re)->labels, (yyvsp[(1) - (2)].labelref)); (yyval.re) = (yyvsp[(2) - (2)].re); - } + ;} break; case 7: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 141 "dtc-parser.y" { (yyval.node) = name_node((yyvsp[(2) - (2)].node), ""); - } + ;} break; case 8: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 145 "dtc-parser.y" { (yyval.node) = merge_nodes((yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node)); - } + ;} break; case 9: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 149 "dtc-parser.y" { struct node *target = get_node_by_ref((yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].labelref)); @@ -1608,12 +1578,12 @@ yyreduce: else print_error("label or path, '%s', not found", (yyvsp[(2) - (3)].labelref)); (yyval.node) = (yyvsp[(1) - (3)].node); - } + ;} break; case 10: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 159 "dtc-parser.y" { struct node *target = get_node_by_ref((yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].labelref)); @@ -1624,112 +1594,112 @@ yyreduce: delete_node(target); (yyval.node) = (yyvsp[(1) - (4)].node); - } + ;} break; case 11: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 173 "dtc-parser.y" { (yyval.node) = build_node((yyvsp[(2) - (5)].proplist), (yyvsp[(3) - (5)].nodelist)); - } + ;} break; case 12: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 180 "dtc-parser.y" { (yyval.proplist) = NULL; - } + ;} break; case 13: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 184 "dtc-parser.y" { (yyval.proplist) = chain_property((yyvsp[(2) - (2)].prop), (yyvsp[(1) - (2)].proplist)); - } + ;} break; case 14: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 191 "dtc-parser.y" { (yyval.prop) = build_property((yyvsp[(1) - (4)].propnodename), (yyvsp[(3) - (4)].data)); - } + ;} break; case 15: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 195 "dtc-parser.y" { (yyval.prop) = build_property((yyvsp[(1) - (2)].propnodename), empty_data); - } + ;} break; case 16: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 199 "dtc-parser.y" { (yyval.prop) = build_property_delete((yyvsp[(2) - (3)].propnodename)); - } + ;} break; case 17: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 203 "dtc-parser.y" { add_label(&(yyvsp[(2) - (2)].prop)->labels, (yyvsp[(1) - (2)].labelref)); (yyval.prop) = (yyvsp[(2) - (2)].prop); - } + ;} break; case 18: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 211 "dtc-parser.y" { (yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data)); - } + ;} break; case 19: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 215 "dtc-parser.y" { (yyval.data) = data_merge((yyvsp[(1) - (3)].data), (yyvsp[(2) - (3)].array).data); - } + ;} break; case 20: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 219 "dtc-parser.y" { (yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data)); - } + ;} break; case 21: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 223 "dtc-parser.y" { (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref)); - } + ;} break; case 22: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 227 "dtc-parser.y" { FILE *f = srcfile_relative_open((yyvsp[(4) - (9)].data).val, NULL); @@ -1746,12 +1716,12 @@ yyreduce: (yyval.data) = data_merge((yyvsp[(1) - (9)].data), d); fclose(f); - } + ;} break; case 23: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 244 "dtc-parser.y" { FILE *f = srcfile_relative_open((yyvsp[(4) - (5)].data).val, NULL); @@ -1761,48 +1731,48 @@ yyreduce: (yyval.data) = data_merge((yyvsp[(1) - (5)].data), d); fclose(f); - } + ;} break; case 24: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 254 "dtc-parser.y" { (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref)); - } + ;} break; case 25: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 261 "dtc-parser.y" { (yyval.data) = empty_data; - } + ;} break; case 26: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 265 "dtc-parser.y" { (yyval.data) = (yyvsp[(1) - (2)].data); - } + ;} break; case 27: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 269 "dtc-parser.y" { (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref)); - } + ;} break; case 28: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 276 "dtc-parser.y" { (yyval.array).data = empty_data; @@ -1817,22 +1787,22 @@ yyreduce: " are currently supported"); (yyval.array).bits = 32; } - } + ;} break; case 29: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 291 "dtc-parser.y" { (yyval.array).data = empty_data; (yyval.array).bits = 32; - } + ;} break; case 30: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 296 "dtc-parser.y" { if ((yyvsp[(1) - (2)].array).bits < 64) { @@ -1852,12 +1822,12 @@ yyreduce: } (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, (yyvsp[(2) - (2)].integer), (yyvsp[(1) - (2)].array).bits); - } + ;} break; case 31: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 316 "dtc-parser.y" { uint64_t val = ~0ULL >> (64 - (yyvsp[(1) - (2)].array).bits); @@ -1871,299 +1841,288 @@ yyreduce: "arrays with 32-bit elements."); (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, val, (yyvsp[(1) - (2)].array).bits); - } + ;} break; case 32: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 330 "dtc-parser.y" { (yyval.array).data = data_add_marker((yyvsp[(1) - (2)].array).data, LABEL, (yyvsp[(2) - (2)].labelref)); - } + ;} break; case 33: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 337 "dtc-parser.y" { (yyval.integer) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64); - } + ;} break; case 34: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 341 "dtc-parser.y" { (yyval.integer) = eval_char_literal((yyvsp[(1) - (1)].literal)); - } + ;} break; case 35: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 345 "dtc-parser.y" { (yyval.integer) = (yyvsp[(2) - (3)].integer); - } + ;} break; case 38: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 356 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); } + { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); ;} break; case 40: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 361 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); ;} break; case 42: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 366 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); ;} break; case 44: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 371 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); ;} break; case 46: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 376 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); ;} break; case 48: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 381 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); ;} break; case 50: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 386 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); ;} break; case 51: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 387 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); ;} break; case 53: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 392 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); ;} break; case 54: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 393 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); ;} break; case 55: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 394 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); ;} break; case 56: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 395 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); ;} break; case 57: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 399 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); ;} break; case 58: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 400 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); ;} break; case 60: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 405 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); ;} break; case 61: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 406 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); ;} break; case 63: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 411 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); ;} break; case 64: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 412 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); ;} break; case 65: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 413 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); } + { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); ;} break; case 68: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 419 "dtc-parser.y" - { (yyval.integer) = -(yyvsp[(2) - (2)].integer); } + { (yyval.integer) = -(yyvsp[(2) - (2)].integer); ;} break; case 69: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 420 "dtc-parser.y" - { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); } + { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); ;} break; case 70: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 421 "dtc-parser.y" - { (yyval.integer) = !(yyvsp[(2) - (2)].integer); } + { (yyval.integer) = !(yyvsp[(2) - (2)].integer); ;} break; case 71: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 426 "dtc-parser.y" { (yyval.data) = empty_data; - } + ;} break; case 72: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 430 "dtc-parser.y" { (yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte)); - } + ;} break; case 73: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 434 "dtc-parser.y" { (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref)); - } + ;} break; case 74: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 441 "dtc-parser.y" { (yyval.nodelist) = NULL; - } + ;} break; case 75: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 445 "dtc-parser.y" { (yyval.nodelist) = chain_node((yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].nodelist)); - } + ;} break; case 76: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 449 "dtc-parser.y" { print_error("syntax error: properties must precede subnodes"); YYERROR; - } + ;} break; case 77: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 457 "dtc-parser.y" { (yyval.node) = name_node((yyvsp[(2) - (2)].node), (yyvsp[(1) - (2)].propnodename)); - } + ;} break; case 78: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 461 "dtc-parser.y" { (yyval.node) = name_node(build_node_delete(), (yyvsp[(2) - (3)].propnodename)); - } + ;} break; case 79: -/* Line 1806 of yacc.c */ +/* Line 1455 of yacc.c */ #line 465 "dtc-parser.y" { add_label(&(yyvsp[(2) - (2)].node)->labels, (yyvsp[(1) - (2)].labelref)); (yyval.node) = (yyvsp[(2) - (2)].node); - } + ;} break; -/* Line 1806 of yacc.c */ -#line 2154 "dtc-parser.tab.c" +/* Line 1455 of yacc.c */ +#line 2124 "dtc-parser.tab.c" default: break; } - /* User semantic actions sometimes alter yychar, and that requires - that yytoken be updated with the new translation. We take the - approach of translating immediately before every use of yytoken. - One alternative is translating here after every semantic action, - but that translation would be missed if the semantic action invokes - YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or - if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an - incorrect destructor might then be invoked immediately. In the - case of YYERROR or YYBACKUP, subsequent parser actions might lead - to an incorrect destructor call or verbose syntax error message - before the lookahead is translated. */ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); YYPOPSTACK (yylen); @@ -2191,10 +2150,6 @@ yyreduce: | yyerrlab -- here on detecting error | `------------------------------------*/ yyerrlab: - /* Make sure we have latest lookahead translation. See comments at - user semantic actions for why this is necessary. */ - yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); - /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { @@ -2202,36 +2157,37 @@ yyerrlab: #if ! YYERROR_VERBOSE yyerror (YY_("syntax error")); #else -# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ - yyssp, yytoken) { - char const *yymsgp = YY_("syntax error"); - int yysyntax_error_status; - yysyntax_error_status = YYSYNTAX_ERROR; - if (yysyntax_error_status == 0) - yymsgp = yymsg; - else if (yysyntax_error_status == 1) - { - if (yymsg != yymsgbuf) - YYSTACK_FREE (yymsg); - yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc); - if (!yymsg) - { - yymsg = yymsgbuf; - yymsg_alloc = sizeof yymsgbuf; - yysyntax_error_status = 2; - } - else - { - yysyntax_error_status = YYSYNTAX_ERROR; - yymsgp = yymsg; - } - } - yyerror (yymsgp); - if (yysyntax_error_status == 2) - goto yyexhaustedlab; + YYSIZE_T yysize = yysyntax_error (0, yystate, yychar); + if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM) + { + YYSIZE_T yyalloc = 2 * yysize; + if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM)) + yyalloc = YYSTACK_ALLOC_MAXIMUM; + if (yymsg != yymsgbuf) + YYSTACK_FREE (yymsg); + yymsg = (char *) YYSTACK_ALLOC (yyalloc); + if (yymsg) + yymsg_alloc = yyalloc; + else + { + yymsg = yymsgbuf; + yymsg_alloc = sizeof yymsgbuf; + } + } + + if (0 < yysize && yysize <= yymsg_alloc) + { + (void) yysyntax_error (yymsg, yystate, yychar); + yyerror (yymsg); + } + else + { + yyerror (YY_("syntax error")); + if (yysize != 0) + goto yyexhaustedlab; + } } -# undef YYSYNTAX_ERROR #endif } @@ -2290,7 +2246,7 @@ yyerrlab1: for (;;) { yyn = yypact[yystate]; - if (!yypact_value_is_default (yyn)) + if (yyn != YYPACT_NINF) { yyn += YYTERROR; if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) @@ -2349,13 +2305,8 @@ yyexhaustedlab: yyreturn: if (yychar != YYEMPTY) - { - /* Make sure we have latest lookahead translation. See comments at - user semantic actions for why this is necessary. */ - yytoken = YYTRANSLATE (yychar); - yydestruct ("Cleanup: discarding lookahead", - yytoken, &yylval); - } + yydestruct ("Cleanup: discarding lookahead", + yytoken, &yylval); /* Do not reclaim the symbols of the rule which action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); @@ -2380,7 +2331,7 @@ yyreturn: -/* Line 2067 of yacc.c */ +/* Line 1675 of yacc.c */ #line 471 "dtc-parser.y" diff --git a/trunk/scripts/dtc/dtc-parser.tab.h_shipped b/trunk/scripts/dtc/dtc-parser.tab.h_shipped index 25d3b88c6132..9d2dce41211f 100644 --- a/trunk/scripts/dtc/dtc-parser.tab.h_shipped +++ b/trunk/scripts/dtc/dtc-parser.tab.h_shipped @@ -1,8 +1,10 @@ -/* A Bison parser, made by GNU Bison 2.5. */ -/* Bison interface for Yacc-like parsers in C +/* A Bison parser, made by GNU Bison 2.4.1. */ + +/* Skeleton interface for Bison's Yacc-like parsers in C - Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc. + Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 + Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -68,7 +70,7 @@ typedef union YYSTYPE { -/* Line 2068 of yacc.c */ +/* Line 1676 of yacc.c */ #line 40 "dtc-parser.y" char *propnodename; @@ -92,8 +94,8 @@ typedef union YYSTYPE -/* Line 2068 of yacc.c */ -#line 97 "dtc-parser.tab.h" +/* Line 1676 of yacc.c */ +#line 99 "dtc-parser.tab.h" } YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 # define yystype YYSTYPE /* obsolescent; will be withdrawn */ diff --git a/trunk/scripts/kconfig/lxdialog/menubox.c b/trunk/scripts/kconfig/lxdialog/menubox.c index 38cd69c5660e..48d382e7e374 100644 --- a/trunk/scripts/kconfig/lxdialog/menubox.c +++ b/trunk/scripts/kconfig/lxdialog/menubox.c @@ -303,11 +303,10 @@ int dialog_menu(const char *title, const char *prompt, } } - if (item_count() != 0 && - (i < max_choice || - key == KEY_UP || key == KEY_DOWN || - key == '-' || key == '+' || - key == KEY_PPAGE || key == KEY_NPAGE)) { + if (i < max_choice || + key == KEY_UP || key == KEY_DOWN || + key == '-' || key == '+' || + key == KEY_PPAGE || key == KEY_NPAGE) { /* Remove highligt of current item */ print_item(scroll + choice, choice, FALSE); diff --git a/trunk/scripts/kconfig/mconf.c b/trunk/scripts/kconfig/mconf.c index a69cbd78fb38..387dc8daf7b2 100644 --- a/trunk/scripts/kconfig/mconf.c +++ b/trunk/scripts/kconfig/mconf.c @@ -670,12 +670,11 @@ static void conf(struct menu *menu, struct menu *active_menu) active_menu, &s_scroll); if (res == 1 || res == KEY_ESC || res == -ERRDISPLAYTOOSMALL) break; - if (item_count() != 0) { - if (!item_activate_selected()) - continue; - if (!item_tag()) - continue; - } + if (!item_activate_selected()) + continue; + if (!item_tag()) + continue; + submenu = item_data(); active_menu = item_data(); if (submenu) diff --git a/trunk/scripts/kconfig/menu.c b/trunk/scripts/kconfig/menu.c index fd3f0180e08f..b5c7d90df9df 100644 --- a/trunk/scripts/kconfig/menu.c +++ b/trunk/scripts/kconfig/menu.c @@ -146,24 +146,11 @@ struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *e struct menu *menu = current_entry; while ((menu = menu->parent) != NULL) { - struct expr *dup_expr; - if (!menu->visibility) continue; - /* - * Do not add a reference to the - * menu's visibility expression but - * use a copy of it. Otherwise the - * expression reduction functions - * will modify expressions that have - * multiple references which can - * cause unwanted side effects. - */ - dup_expr = expr_copy(menu->visibility); - prop->visible.expr = expr_alloc_and(prop->visible.expr, - dup_expr); + menu->visibility); } } diff --git a/trunk/scripts/package/Makefile b/trunk/scripts/package/Makefile index a4f31c900fa6..84a406070f6f 100644 --- a/trunk/scripts/package/Makefile +++ b/trunk/scripts/package/Makefile @@ -63,7 +63,7 @@ binrpm-pkg: FORCE mv -f $(objtree)/.tmp_version $(objtree)/.version $(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \ - $(UTS_MACHINE) -bb $(objtree)/binkernel.spec + $(UTS_MACHINE) -bb $< rm binkernel.spec # Deb target diff --git a/trunk/security/selinux/xfrm.c b/trunk/security/selinux/xfrm.c index d03081886214..8ab295154517 100644 --- a/trunk/security/selinux/xfrm.c +++ b/trunk/security/selinux/xfrm.c @@ -316,7 +316,6 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, memcpy(new_ctx, old_ctx, sizeof(*new_ctx)); memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len); - atomic_inc(&selinux_xfrm_refcount); *new_ctxp = new_ctx; } return 0; @@ -327,7 +326,6 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, */ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) { - atomic_dec(&selinux_xfrm_refcount); kfree(ctx); } @@ -337,13 +335,17 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) { const struct task_security_struct *tsec = current_security(); + int rc = 0; - if (!ctx) - return 0; + if (ctx) { + rc = avc_has_perm(tsec->sid, ctx->ctx_sid, + SECCLASS_ASSOCIATION, + ASSOCIATION__SETCONTEXT, NULL); + if (rc == 0) + atomic_dec(&selinux_xfrm_refcount); + } - return avc_has_perm(tsec->sid, ctx->ctx_sid, - SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, - NULL); + return rc; } /* @@ -368,8 +370,8 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct */ void selinux_xfrm_state_free(struct xfrm_state *x) { - atomic_dec(&selinux_xfrm_refcount); - kfree(x->security); + struct xfrm_sec_ctx *ctx = x->security; + kfree(ctx); } /* @@ -379,13 +381,17 @@ int selinux_xfrm_state_delete(struct xfrm_state *x) { const struct task_security_struct *tsec = current_security(); struct xfrm_sec_ctx *ctx = x->security; + int rc = 0; - if (!ctx) - return 0; + if (ctx) { + rc = avc_has_perm(tsec->sid, ctx->ctx_sid, + SECCLASS_ASSOCIATION, + ASSOCIATION__SETCONTEXT, NULL); + if (rc == 0) + atomic_dec(&selinux_xfrm_refcount); + } - return avc_has_perm(tsec->sid, ctx->ctx_sid, - SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, - NULL); + return rc; } /* diff --git a/trunk/sound/aoa/fabrics/layout.c b/trunk/sound/aoa/fabrics/layout.c index 61ab640e195f..552b97afbca5 100644 --- a/trunk/sound/aoa/fabrics/layout.c +++ b/trunk/sound/aoa/fabrics/layout.c @@ -113,7 +113,6 @@ MODULE_ALIAS("sound-layout-100"); MODULE_ALIAS("aoa-device-id-14"); MODULE_ALIAS("aoa-device-id-22"); MODULE_ALIAS("aoa-device-id-35"); -MODULE_ALIAS("aoa-device-id-44"); /* onyx with all but microphone connected */ static struct codec_connection onyx_connections_nomic[] = { @@ -362,13 +361,6 @@ static struct layout layouts[] = { .connections = tas_connections_nolineout, }, }, - /* PowerBook6,5 */ - { .device_id = 44, - .codecs[0] = { - .name = "tas", - .connections = tas_connections_all, - }, - }, /* PowerBook6,7 */ { .layout_id = 80, .codecs[0] = { diff --git a/trunk/sound/aoa/soundbus/i2sbus/core.c b/trunk/sound/aoa/soundbus/i2sbus/core.c index 15e76131b501..010658335881 100644 --- a/trunk/sound/aoa/soundbus/i2sbus/core.c +++ b/trunk/sound/aoa/soundbus/i2sbus/core.c @@ -200,8 +200,7 @@ static int i2sbus_add_dev(struct macio_dev *macio, * We probably cannot handle all device-id machines, * so restrict to those we do handle for now. */ - if (id && (*id == 22 || *id == 14 || *id == 35 || - *id == 44)) { + if (id && (*id == 22 || *id == 14 || *id == 35)) { snprintf(dev->sound.modalias, 32, "aoa-device-id-%d", *id); ok = 1; diff --git a/trunk/sound/core/pcm_native.c b/trunk/sound/core/pcm_native.c index f92818155958..ccfa383f1fda 100644 --- a/trunk/sound/core/pcm_native.c +++ b/trunk/sound/core/pcm_native.c @@ -1649,7 +1649,6 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) } if (!snd_pcm_stream_linked(substream)) { substream->group = group; - group = NULL; spin_lock_init(&substream->group->lock); INIT_LIST_HEAD(&substream->group->substreams); list_add_tail(&substream->link_list, &substream->group->substreams); @@ -1664,7 +1663,8 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) _nolock: snd_card_unref(substream1->pcm->card); fput_light(file, fput_needed); - kfree(group); + if (res < 0) + kfree(group); return res; } diff --git a/trunk/sound/oss/Kconfig b/trunk/sound/oss/Kconfig index 1a9640254433..51c4ba95a32d 100644 --- a/trunk/sound/oss/Kconfig +++ b/trunk/sound/oss/Kconfig @@ -250,7 +250,7 @@ config MSND_FIFOSIZE menuconfig SOUND_OSS tristate "OSS sound modules" depends on ISA_DMA_API && VIRT_TO_BUS - depends on !GENERIC_ISA_DMA_SUPPORT_BROKEN + depends on !ISA_DMA_SUPPORT_BROKEN help OSS is the Open Sound System suite of sound card drivers. They make sound programming easier since they provide a common API. Say Y or diff --git a/trunk/sound/pci/hda/hda_generic.c b/trunk/sound/pci/hda/hda_generic.c index 4b1524a861f3..ac079f93c535 100644 --- a/trunk/sound/pci/hda/hda_generic.c +++ b/trunk/sound/pci/hda/hda_generic.c @@ -606,10 +606,6 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid, return false; } -/* check whether the NID is referred by any active paths */ -#define is_active_nid_for_any(codec, nid) \ - is_active_nid(codec, nid, HDA_OUTPUT, 0) - /* get the default amp value for the target state */ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, int dir, unsigned int caps, bool enable) @@ -763,8 +759,7 @@ static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) for (i = 0; i < path->depth; i++) { hda_nid_t nid = path->path[i]; - if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3) && - !is_active_nid_for_any(codec, nid)) { + if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3)) { snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D3); @@ -788,8 +783,6 @@ static void set_pin_eapd(struct hda_codec *codec, hda_nid_t pin, bool enable) return; if (codec->inv_eapd) enable = !enable; - if (spec->keep_eapd_on && !enable) - return; snd_hda_codec_update_cache(codec, pin, 0, AC_VERB_SET_EAPD_BTLENABLE, enable ? 0x02 : 0x00); @@ -1940,7 +1933,17 @@ static int create_speaker_out_ctls(struct hda_codec *codec) * independent HP controls */ -static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack); +/* update HP auto-mute state too */ +static void update_hp_automute_hook(struct hda_codec *codec) +{ + struct hda_gen_spec *spec = codec->spec; + + if (spec->hp_automute_hook) + spec->hp_automute_hook(codec, NULL); + else + snd_hda_gen_hp_automute(codec, NULL); +} + static int indep_hp_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { @@ -2001,7 +2004,7 @@ static int indep_hp_put(struct snd_kcontrol *kcontrol, else *dacp = spec->alt_dac_nid; - call_hp_automute(codec, NULL); + update_hp_automute_hook(codec); ret = 1; } unlock: @@ -2297,7 +2300,7 @@ static void update_hp_mic(struct hda_codec *codec, int adc_mux, bool force) else val = PIN_HP; set_pin_target(codec, pin, val, true); - call_hp_automute(codec, NULL); + update_hp_automute_hook(codec); } } @@ -2706,7 +2709,7 @@ static int hp_mic_jack_mode_put(struct snd_kcontrol *kcontrol, val = snd_hda_get_default_vref(codec, nid); } snd_hda_set_pin_ctl_cache(codec, nid, val); - call_hp_automute(codec, NULL); + update_hp_automute_hook(codec); return 1; } @@ -3851,42 +3854,20 @@ void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, struct hda_jack_tbl *ja } EXPORT_SYMBOL_HDA(snd_hda_gen_mic_autoswitch); -/* call appropriate hooks */ -static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack) +/* update jack retasking */ +static void update_automute_all(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; - if (spec->hp_automute_hook) - spec->hp_automute_hook(codec, jack); - else - snd_hda_gen_hp_automute(codec, jack); -} -static void call_line_automute(struct hda_codec *codec, - struct hda_jack_tbl *jack) -{ - struct hda_gen_spec *spec = codec->spec; + update_hp_automute_hook(codec); if (spec->line_automute_hook) - spec->line_automute_hook(codec, jack); + spec->line_automute_hook(codec, NULL); else - snd_hda_gen_line_automute(codec, jack); -} - -static void call_mic_autoswitch(struct hda_codec *codec, - struct hda_jack_tbl *jack) -{ - struct hda_gen_spec *spec = codec->spec; + snd_hda_gen_line_automute(codec, NULL); if (spec->mic_autoswitch_hook) - spec->mic_autoswitch_hook(codec, jack); + spec->mic_autoswitch_hook(codec, NULL); else - snd_hda_gen_mic_autoswitch(codec, jack); -} - -/* update jack retasking */ -static void update_automute_all(struct hda_codec *codec) -{ - call_hp_automute(codec, NULL); - call_line_automute(codec, NULL); - call_mic_autoswitch(codec, NULL); + snd_hda_gen_mic_autoswitch(codec, NULL); } /* @@ -4023,7 +4004,9 @@ static int check_auto_mute_availability(struct hda_codec *codec) snd_printdd("hda-codec: Enable HP auto-muting on NID 0x%x\n", nid); snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_HP_EVENT, - call_hp_automute); + spec->hp_automute_hook ? + spec->hp_automute_hook : + snd_hda_gen_hp_automute); spec->detect_hp = 1; } @@ -4036,7 +4019,9 @@ static int check_auto_mute_availability(struct hda_codec *codec) snd_printdd("hda-codec: Enable Line-Out auto-muting on NID 0x%x\n", nid); snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_FRONT_EVENT, - call_line_automute); + spec->line_automute_hook ? + spec->line_automute_hook : + snd_hda_gen_line_automute); spec->detect_lo = 1; } spec->automute_lo_possible = spec->detect_hp; @@ -4078,7 +4063,9 @@ static bool auto_mic_check_imux(struct hda_codec *codec) snd_hda_jack_detect_enable_callback(codec, spec->am_entry[i].pin, HDA_GEN_MIC_EVENT, - call_mic_autoswitch); + spec->mic_autoswitch_hook ? + spec->mic_autoswitch_hook : + snd_hda_gen_mic_autoswitch); return true; } @@ -4170,7 +4157,7 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec, return power_state; if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER) return power_state; - if (is_active_nid_for_any(codec, nid)) + if (is_active_nid(codec, nid, HDA_OUTPUT, 0)) return power_state; return AC_PWRST_D3; } diff --git a/trunk/sound/pci/hda/hda_generic.h b/trunk/sound/pci/hda/hda_generic.h index 76200314ee95..54e665160379 100644 --- a/trunk/sound/pci/hda/hda_generic.h +++ b/trunk/sound/pci/hda/hda_generic.h @@ -222,7 +222,6 @@ struct hda_gen_spec { unsigned int multi_cap_vol:1; /* allow multiple capture xxx volumes */ unsigned int inv_dmic_split:1; /* inverted dmic w/a for conexant */ unsigned int own_eapd_ctl:1; /* set EAPD by own function */ - unsigned int keep_eapd_on:1; /* don't turn off EAPD automatically */ unsigned int vmaster_mute_enum:1; /* add vmaster mute mode enum */ unsigned int indep_hp:1; /* independent HP supported */ unsigned int prefer_hp_amp:1; /* enable HP amp for speaker if any */ diff --git a/trunk/sound/pci/hda/patch_cirrus.c b/trunk/sound/pci/hda/patch_cirrus.c index cccaf9c7a7bb..bd8d46cca2b3 100644 --- a/trunk/sound/pci/hda/patch_cirrus.c +++ b/trunk/sound/pci/hda/patch_cirrus.c @@ -58,7 +58,6 @@ enum { CS420X_GPIO_23, CS420X_MBP101, CS420X_MBP81, - CS420X_MBA42, CS420X_AUTO, /* aliases */ CS420X_IMAC27_122 = CS420X_GPIO_23, @@ -347,7 +346,6 @@ static const struct hda_model_fixup cs420x_models[] = { { .id = CS420X_APPLE, .name = "apple" }, { .id = CS420X_MBP101, .name = "mbp101" }, { .id = CS420X_MBP81, .name = "mbp81" }, - { .id = CS420X_MBA42, .name = "mba42" }, {} }; @@ -363,7 +361,6 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), - SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), {} /* terminator */ }; @@ -417,20 +414,6 @@ static const struct hda_pintbl mbp101_pincfgs[] = { {} /* terminator */ }; -static const struct hda_pintbl mba42_pincfgs[] = { - { 0x09, 0x012b4030 }, /* HP */ - { 0x0a, 0x400000f0 }, - { 0x0b, 0x90100120 }, /* speaker */ - { 0x0c, 0x400000f0 }, - { 0x0d, 0x90a00110 }, /* mic */ - { 0x0e, 0x400000f0 }, - { 0x0f, 0x400000f0 }, - { 0x10, 0x400000f0 }, - { 0x12, 0x400000f0 }, - { 0x15, 0x400000f0 }, - {} /* terminator */ -}; - static void cs420x_fixup_gpio_13(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -499,12 +482,6 @@ static const struct hda_fixup cs420x_fixups[] = { .chained = true, .chain_id = CS420X_GPIO_13, }, - [CS420X_MBA42] = { - .type = HDA_FIXUP_PINS, - .v.pins = mba42_pincfgs, - .chained = true, - .chain_id = CS420X_GPIO_13, - }, }; static struct cs_spec *cs_alloc_spec(struct hda_codec *codec, int vendor_nid) diff --git a/trunk/sound/pci/hda/patch_realtek.c b/trunk/sound/pci/hda/patch_realtek.c index 403010c9e82e..6bf47f7326ad 100644 --- a/trunk/sound/pci/hda/patch_realtek.c +++ b/trunk/sound/pci/hda/patch_realtek.c @@ -3482,8 +3482,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x05e0, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -3494,10 +3492,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1), @@ -3535,7 +3529,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), - SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), @@ -3599,8 +3592,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"}, {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, - {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, - {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, {} }; @@ -4280,7 +4271,6 @@ static const struct hda_model_fixup alc662_fixup_models[] = { {.id = ALC662_FIXUP_ASUS_MODE7, .name = "asus-mode7"}, {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"}, {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"}, - {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, {} }; diff --git a/trunk/sound/pci/hda/patch_via.c b/trunk/sound/pci/hda/patch_via.c index e5245544eb52..e0dadcf2030d 100644 --- a/trunk/sound/pci/hda/patch_via.c +++ b/trunk/sound/pci/hda/patch_via.c @@ -136,7 +136,6 @@ static struct via_spec *via_new_spec(struct hda_codec *codec) spec->codec_type = VT1708S; spec->no_pin_power_ctl = 1; spec->gen.indep_hp = 1; - spec->gen.keep_eapd_on = 1; spec->gen.pcm_playback_hook = via_playback_pcm_hook; return spec; } @@ -232,14 +231,9 @@ static void vt1708_update_hp_work(struct hda_codec *codec) static void set_widgets_power_state(struct hda_codec *codec) { -#if 0 /* FIXME: the assumed connections don't match always with the - * actual routes by the generic parser, so better to disable - * the control for safety. - */ struct via_spec *spec = codec->spec; if (spec->set_widgets_power_state) spec->set_widgets_power_state(codec); -#endif } static void update_power_state(struct hda_codec *codec, hda_nid_t nid, @@ -484,9 +478,7 @@ static int via_suspend(struct hda_codec *codec) /* Fix pop noise on headphones */ int i; for (i = 0; i < spec->gen.autocfg.hp_outs; i++) - snd_hda_codec_write(codec, spec->gen.autocfg.hp_pins[i], - 0, AC_VERB_SET_PIN_WIDGET_CONTROL, - 0x00); + snd_hda_set_pin_ctl(codec, spec->gen.autocfg.hp_pins[i], 0); } return 0; diff --git a/trunk/sound/pci/sis7019.c b/trunk/sound/pci/sis7019.c index 748e82d4d257..d59abe1682c5 100644 --- a/trunk/sound/pci/sis7019.c +++ b/trunk/sound/pci/sis7019.c @@ -1341,8 +1341,7 @@ static int sis_chip_create(struct snd_card *card, if (rc) goto error_out; - rc = pci_set_dma_mask(pci, DMA_BIT_MASK(30)); - if (rc < 0) { + if (pci_set_dma_mask(pci, DMA_BIT_MASK(30)) < 0) { dev_err(&pci->dev, "architecture does not support 30-bit PCI busmaster DMA"); goto error_out_enabled; } diff --git a/trunk/sound/soc/codecs/ab8500-codec.h b/trunk/sound/soc/codecs/ab8500-codec.h index 306d0bc8455f..114f69a0c629 100644 --- a/trunk/sound/soc/codecs/ab8500-codec.h +++ b/trunk/sound/soc/codecs/ab8500-codec.h @@ -348,25 +348,25 @@ /* AB8500_ADSLOTSELX */ #define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD 0x00 -#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x10 -#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x20 -#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x30 -#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x40 -#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x50 -#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x60 -#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x70 -#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x80 -#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0xF0 +#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x01 +#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x02 +#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x03 +#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x04 +#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x05 +#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x06 +#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x07 +#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x08 +#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0x0F #define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00 -#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x01 -#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x02 -#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x03 -#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x04 -#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x05 -#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x06 -#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x07 -#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x08 -#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0x0F +#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x10 +#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x20 +#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x30 +#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x40 +#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x50 +#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x60 +#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x70 +#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x80 +#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0xF0 #define AB8500_ADSLOTSELX_EVEN_SHIFT 0 #define AB8500_ADSLOTSELX_ODD_SHIFT 4 diff --git a/trunk/sound/soc/codecs/cs42l52.c b/trunk/sound/soc/codecs/cs42l52.c index 987f728718c5..0f6f481cec09 100644 --- a/trunk/sound/soc/codecs/cs42l52.c +++ b/trunk/sound/soc/codecs/cs42l52.c @@ -86,7 +86,7 @@ static const struct reg_default cs42l52_reg_defaults[] = { { CS42L52_BEEP_VOL, 0x00 }, /* r1D Beep Volume off Time */ { CS42L52_BEEP_TONE_CTL, 0x00 }, /* r1E Beep Tone Cfg. */ { CS42L52_TONE_CTL, 0x00 }, /* r1F Tone Ctl */ - { CS42L52_MASTERA_VOL, 0x00 }, /* r20 Master A Volume */ + { CS42L52_MASTERA_VOL, 0x88 }, /* r20 Master A Volume */ { CS42L52_MASTERB_VOL, 0x00 }, /* r21 Master B Volume */ { CS42L52_HPA_VOL, 0x00 }, /* r22 Headphone A Volume */ { CS42L52_HPB_VOL, 0x00 }, /* r23 Headphone B Volume */ @@ -193,8 +193,6 @@ static DECLARE_TLV_DB_SCALE(mic_tlv, 1600, 100, 0); static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0); -static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); - static const unsigned int limiter_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), @@ -227,7 +225,7 @@ static const char * const mic_bias_level_text[] = { }; static const struct soc_enum mic_bias_level_enum = - SOC_ENUM_SINGLE(CS42L52_IFACE_CTL2, 0, + SOC_ENUM_SINGLE(CS42L52_IFACE_CTL1, 0, ARRAY_SIZE(mic_bias_level_text), mic_bias_level_text); static const char * const cs42l52_mic_text[] = { "Single", "Differential" }; @@ -262,7 +260,7 @@ static const char * const hp_gain_num_text[] = { }; static const struct soc_enum hp_gain_enum = - SOC_ENUM_SINGLE(CS42L52_PB_CTL1, 5, + SOC_ENUM_SINGLE(CS42L52_PB_CTL1, 4, ARRAY_SIZE(hp_gain_num_text), hp_gain_num_text); static const char * const beep_pitch_text[] = { @@ -415,7 +413,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { SOC_ENUM("Headphone Analog Gain", hp_gain_enum), SOC_DOUBLE_R_SX_TLV("Speaker Volume", CS42L52_SPKA_VOL, - CS42L52_SPKB_VOL, 0, 0x1, 0xff, hl_tlv), + CS42L52_SPKB_VOL, 7, 0x1, 0xff, hl_tlv), SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL, CS42L52_PASSTHRUB_VOL, 6, 0x18, 0x90, pga_tlv), @@ -443,7 +441,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { SOC_DOUBLE_R_SX_TLV("PCM Mixer Volume", CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, - 0, 0x7f, 0x19, mix_tlv), + 6, 0x7f, 0x19, hl_tlv), SOC_DOUBLE_R("PCM Mixer Switch", CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, 7, 1, 1), diff --git a/trunk/sound/soc/codecs/cs42l52.h b/trunk/sound/soc/codecs/cs42l52.h index 4277012c4719..60985c059071 100644 --- a/trunk/sound/soc/codecs/cs42l52.h +++ b/trunk/sound/soc/codecs/cs42l52.h @@ -157,7 +157,7 @@ #define CS42L52_PB_CTL1_INV_PCMA (1 << 2) #define CS42L52_PB_CTL1_MSTB_MUTE (1 << 1) #define CS42L52_PB_CTL1_MSTA_MUTE (1 << 0) -#define CS42L52_PB_CTL1_MUTE_MASK 0x03 +#define CS42L52_PB_CTL1_MUTE_MASK 0xFFFD #define CS42L52_PB_CTL1_MUTE 3 #define CS42L52_PB_CTL1_UNMUTE 0 diff --git a/trunk/sound/soc/codecs/da7213.c b/trunk/sound/soc/codecs/da7213.c index 4a6f1daf911f..41230ad1c3e0 100644 --- a/trunk/sound/soc/codecs/da7213.c +++ b/trunk/sound/soc/codecs/da7213.c @@ -1488,17 +1488,17 @@ static int da7213_probe(struct snd_soc_codec *codec) DA7213_DMIC_DATA_SEL_SHIFT); break; } - switch (pdata->dmic_samplephase) { + switch (pdata->dmic_data_sel) { case DA7213_DMIC_SAMPLE_ON_CLKEDGE: case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE: - dmic_cfg |= (pdata->dmic_samplephase << + dmic_cfg |= (pdata->dmic_data_sel << DA7213_DMIC_SAMPLEPHASE_SHIFT); break; } - switch (pdata->dmic_clk_rate) { + switch (pdata->dmic_data_sel) { case DA7213_DMIC_CLK_3_0MHZ: case DA7213_DMIC_CLK_1_5MHZ: - dmic_cfg |= (pdata->dmic_clk_rate << + dmic_cfg |= (pdata->dmic_data_sel << DA7213_DMIC_CLK_RATE_SHIFT); break; } diff --git a/trunk/sound/soc/codecs/max98090.c b/trunk/sound/soc/codecs/max98090.c index 8d14a76c7249..ce0d36412c97 100644 --- a/trunk/sound/soc/codecs/max98090.c +++ b/trunk/sound/soc/codecs/max98090.c @@ -2233,7 +2233,7 @@ static int max98090_probe(struct snd_soc_codec *codec) dev_dbg(codec->dev, "irq = %d\n", max98090->irq); ret = request_threaded_irq(max98090->irq, NULL, - max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + max98090_interrupt, IRQF_TRIGGER_FALLING, "max98090_interrupt", codec); if (ret < 0) { dev_err(codec->dev, "request_irq failed: %d\n", diff --git a/trunk/sound/soc/codecs/tlv320aic3x.c b/trunk/sound/soc/codecs/tlv320aic3x.c index 1514bf845e4b..65d09d60b7c6 100644 --- a/trunk/sound/soc/codecs/tlv320aic3x.c +++ b/trunk/sound/soc/codecs/tlv320aic3x.c @@ -187,14 +187,14 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol, break; } - } - mutex_unlock(&widget->codec->mutex); + if (found) + snd_soc_dapm_sync(widget->dapm); + } - if (found) - snd_soc_dapm_sync(widget->dapm); + ret = snd_soc_update_bits(widget->codec, reg, val_mask, val); - ret = snd_soc_update_bits_locked(widget->codec, reg, val_mask, val); + mutex_unlock(&widget->codec->mutex); return ret; } diff --git a/trunk/sound/soc/codecs/wm0010.c b/trunk/sound/soc/codecs/wm0010.c index 370af0cbcc9a..8df2b6e1a1a6 100644 --- a/trunk/sound/soc/codecs/wm0010.c +++ b/trunk/sound/soc/codecs/wm0010.c @@ -667,7 +667,6 @@ static int wm0010_boot(struct snd_soc_codec *codec) /* On wm0010 only the CLKCTRL1 value is used */ pll_rec.clkctrl1 = wm0010->pll_clkctrl1; - ret = -ENOMEM; len = pll_rec.length + 8; out = kzalloc(len, GFP_KERNEL); if (!out) { diff --git a/trunk/sound/soc/codecs/wm5102.c b/trunk/sound/soc/codecs/wm5102.c index 100fdadda56a..e895d3939eef 100644 --- a/trunk/sound/soc/codecs/wm5102.c +++ b/trunk/sound/soc/codecs/wm5102.c @@ -1120,8 +1120,7 @@ SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0, ARIZONA_DSP_WIDGETS(DSP1, "DSP1"), SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1, - ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0, - &wm5102_aec_loopback_mux), + ARIZONA_AEC_LOOPBACK_ENA, 0, &wm5102_aec_loopback_mux), SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM, ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev, diff --git a/trunk/sound/soc/codecs/wm5110.c b/trunk/sound/soc/codecs/wm5110.c index 88ad7db52dde..731884e04776 100644 --- a/trunk/sound/soc/codecs/wm5110.c +++ b/trunk/sound/soc/codecs/wm5110.c @@ -190,7 +190,7 @@ ARIZONA_MIXER_CONTROLS("DSP2R", ARIZONA_DSP2RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP3L", ARIZONA_DSP3LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP3R", ARIZONA_DSP3RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP4L", ARIZONA_DSP4LMIX_INPUT_1_SOURCE), -ARIZONA_MIXER_CONTROLS("DSP4R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE), +ARIZONA_MIXER_CONTROLS("DSP5R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE), @@ -503,8 +503,7 @@ SND_SOC_DAPM_PGA("ASRC2R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1, - ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0, - &wm5110_aec_loopback_mux), + ARIZONA_AEC_LOOPBACK_ENA, 0, &wm5110_aec_loopback_mux), SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0), @@ -977,8 +976,6 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec) if (ret != 0) return ret; - arizona_init_spk(codec); - snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS"); priv->core.arizona->dapm = &codec->dapm; diff --git a/trunk/sound/soc/codecs/wm8994.c b/trunk/sound/soc/codecs/wm8994.c index 29e95f93d482..1eb152cb1097 100644 --- a/trunk/sound/soc/codecs/wm8994.c +++ b/trunk/sound/soc/codecs/wm8994.c @@ -383,8 +383,6 @@ static int wm8994_get_drc_enum(struct snd_kcontrol *kcontrol, struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); int drc = wm8994_get_drc(kcontrol->id.name); - if (drc < 0) - return drc; ucontrol->value.enumerated.item[0] = wm8994->drc_cfg[drc]; return 0; @@ -490,9 +488,6 @@ static int wm8994_get_retune_mobile_enum(struct snd_kcontrol *kcontrol, struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); int block = wm8994_get_retune_mobile_block(kcontrol->id.name); - if (block < 0) - return block; - ucontrol->value.enumerated.item[0] = wm8994->retune_mobile_cfg[block]; return 0; @@ -1036,7 +1031,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, { struct snd_soc_codec *codec = w->codec; struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); - struct wm8994 *control = wm8994->wm8994; + struct wm8994 *control = codec->control_data; int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; int i; int dac; @@ -3836,14 +3831,8 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data) ret); } else if (!(ret & WM1811_JACKDET_LVL)) { dev_dbg(codec->dev, "Ignoring removed jack\n"); - goto out; + return IRQ_HANDLED; } - } else if (!(reg & WM8958_MICD_STS)) { - snd_soc_jack_report(wm8994->micdet[0].jack, 0, - SND_JACK_MECHANICAL | SND_JACK_HEADSET | - wm8994->btn_mask); - wm8994->mic_detecting = true; - goto out; } if (wm8994->mic_detecting) diff --git a/trunk/sound/soc/davinci/davinci-mcasp.c b/trunk/sound/soc/davinci/davinci-mcasp.c index 81490febac6d..56ecfc72f2e9 100644 --- a/trunk/sound/soc/davinci/davinci-mcasp.c +++ b/trunk/sound/soc/davinci/davinci-mcasp.c @@ -631,8 +631,7 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev, int word_length) { u32 fmt; - u32 tx_rotate = (word_length / 4) & 0x7; - u32 rx_rotate = (32 - word_length) / 4; + u32 rotate = (word_length / 4) & 0x7; u32 mask = (1ULL << word_length) - 1; /* @@ -656,9 +655,9 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev, mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXSSZ(fmt), TXSSZ(0x0F)); mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, - TXROT(tx_rotate), TXROT(7)); + TXROT(rotate), TXROT(7)); mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, - RXROT(rx_rotate), RXROT(7)); + RXROT(rotate), RXROT(7)); mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, mask); } diff --git a/trunk/sound/soc/fsl/imx-ssi.c b/trunk/sound/soc/fsl/imx-ssi.c index c6fa03e2114a..902fab02b851 100644 --- a/trunk/sound/soc/fsl/imx-ssi.c +++ b/trunk/sound/soc/fsl/imx-ssi.c @@ -540,6 +540,11 @@ static int imx_ssi_probe(struct platform_device *pdev) clk_prepare_enable(ssi->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENODEV; + goto failed_get_resource; + } + ssi->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ssi->base)) { ret = PTR_ERR(ssi->base); @@ -628,6 +633,7 @@ static int imx_ssi_probe(struct platform_device *pdev) snd_soc_unregister_component(&pdev->dev); failed_register: release_mem_region(res->start, resource_size(res)); +failed_get_resource: clk_disable_unprepare(ssi->clk); failed_clk: diff --git a/trunk/sound/soc/kirkwood/kirkwood-i2s.c b/trunk/sound/soc/kirkwood/kirkwood-i2s.c index 4c9dad3263c5..befe68f59285 100644 --- a/trunk/sound/soc/kirkwood/kirkwood-i2s.c +++ b/trunk/sound/soc/kirkwood/kirkwood-i2s.c @@ -471,6 +471,11 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "platform_get_resource failed\n"); + return -ENXIO; + } + priv->io = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(priv->io)) return PTR_ERR(priv->io); diff --git a/trunk/sound/soc/samsung/idma.c b/trunk/sound/soc/samsung/idma.c index ce1e1e16f250..6e5fed30aa27 100644 --- a/trunk/sound/soc/samsung/idma.c +++ b/trunk/sound/soc/samsung/idma.c @@ -257,6 +257,7 @@ static int idma_mmap(struct snd_pcm_substream *substream, /* From snd_pcm_lib_mmap_iomem */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_flags |= VM_IO; size = vma->vm_end - vma->vm_start; offset = vma->vm_pgoff << PAGE_SHIFT; ret = io_remap_pfn_range(vma, vma->vm_start, diff --git a/trunk/sound/soc/soc-compress.c b/trunk/sound/soc/soc-compress.c index 06a8000aa07b..3853f7eb3f28 100644 --- a/trunk/sound/soc/soc-compress.c +++ b/trunk/sound/soc/soc-compress.c @@ -220,12 +220,8 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream, goto err; } - if (cstream->direction == SND_COMPRESS_PLAYBACK) - snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK, - SND_SOC_DAPM_STREAM_START); - else - snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE, - SND_SOC_DAPM_STREAM_START); + snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK, + SND_SOC_DAPM_STREAM_START); /* cancel any delayed stream shutdown that is pending */ rtd->pop_wait = 0; diff --git a/trunk/sound/soc/soc-dapm.c b/trunk/sound/soc/soc-dapm.c index c7051c457b75..a80c883bb8be 100644 --- a/trunk/sound/soc/soc-dapm.c +++ b/trunk/sound/soc/soc-dapm.c @@ -55,8 +55,7 @@ static int dapm_up_seq[] = { [snd_soc_dapm_clock_supply] = 1, [snd_soc_dapm_micbias] = 2, [snd_soc_dapm_dai_link] = 2, - [snd_soc_dapm_dai_in] = 3, - [snd_soc_dapm_dai_out] = 3, + [snd_soc_dapm_dai] = 3, [snd_soc_dapm_aif_in] = 3, [snd_soc_dapm_aif_out] = 3, [snd_soc_dapm_mic] = 4, @@ -93,8 +92,7 @@ static int dapm_down_seq[] = { [snd_soc_dapm_value_mux] = 9, [snd_soc_dapm_aif_in] = 10, [snd_soc_dapm_aif_out] = 10, - [snd_soc_dapm_dai_in] = 10, - [snd_soc_dapm_dai_out] = 10, + [snd_soc_dapm_dai] = 10, [snd_soc_dapm_dai_link] = 11, [snd_soc_dapm_clock_supply] = 12, [snd_soc_dapm_regulator_supply] = 12, @@ -421,8 +419,7 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w, case snd_soc_dapm_clock_supply: case snd_soc_dapm_aif_in: case snd_soc_dapm_aif_out: - case snd_soc_dapm_dai_in: - case snd_soc_dapm_dai_out: + case snd_soc_dapm_dai: case snd_soc_dapm_hp: case snd_soc_dapm_mic: case snd_soc_dapm_spk: @@ -823,7 +820,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, switch (widget->id) { case snd_soc_dapm_adc: case snd_soc_dapm_aif_out: - case snd_soc_dapm_dai_out: + case snd_soc_dapm_dai: if (widget->active) { widget->outputs = snd_soc_dapm_suspend_check(widget); return widget->outputs; @@ -919,7 +916,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, switch (widget->id) { case snd_soc_dapm_dac: case snd_soc_dapm_aif_in: - case snd_soc_dapm_dai_in: + case snd_soc_dapm_dai: if (widget->active) { widget->inputs = snd_soc_dapm_suspend_check(widget); return widget->inputs; @@ -1138,6 +1135,16 @@ static int dapm_generic_check_power(struct snd_soc_dapm_widget *w) return out != 0 && in != 0; } +static int dapm_dai_check_power(struct snd_soc_dapm_widget *w) +{ + DAPM_UPDATE_STAT(w, power_checks); + + if (w->active) + return w->active; + + return dapm_generic_check_power(w); +} + /* Check to see if an ADC has power */ static int dapm_adc_check_power(struct snd_soc_dapm_widget *w) { @@ -2311,8 +2318,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm, case snd_soc_dapm_clock_supply: case snd_soc_dapm_aif_in: case snd_soc_dapm_aif_out: - case snd_soc_dapm_dai_in: - case snd_soc_dapm_dai_out: + case snd_soc_dapm_dai: case snd_soc_dapm_dai_link: list_add(&path->list, &dapm->card->paths); list_add(&path->list_sink, &wsink->sources); @@ -3123,12 +3129,10 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm, break; case snd_soc_dapm_adc: case snd_soc_dapm_aif_out: - case snd_soc_dapm_dai_out: w->power_check = dapm_adc_check_power; break; case snd_soc_dapm_dac: case snd_soc_dapm_aif_in: - case snd_soc_dapm_dai_in: w->power_check = dapm_dac_check_power; break; case snd_soc_dapm_pga: @@ -3148,6 +3152,9 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm, case snd_soc_dapm_clock_supply: w->power_check = dapm_supply_check_power; break; + case snd_soc_dapm_dai: + w->power_check = dapm_dai_check_power; + break; default: w->power_check = dapm_always_on_check_power; break; @@ -3368,7 +3375,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, template.reg = SND_SOC_NOPM; if (dai->driver->playback.stream_name) { - template.id = snd_soc_dapm_dai_in; + template.id = snd_soc_dapm_dai; template.name = dai->driver->playback.stream_name; template.sname = dai->driver->playback.stream_name; @@ -3386,7 +3393,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, } if (dai->driver->capture.stream_name) { - template.id = snd_soc_dapm_dai_out; + template.id = snd_soc_dapm_dai; template.name = dai->driver->capture.stream_name; template.sname = dai->driver->capture.stream_name; @@ -3416,13 +3423,8 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card) /* For each DAI widget... */ list_for_each_entry(dai_w, &card->widgets, list) { - switch (dai_w->id) { - case snd_soc_dapm_dai_in: - case snd_soc_dapm_dai_out: - break; - default: + if (dai_w->id != snd_soc_dapm_dai) continue; - } dai = dai_w->priv; @@ -3431,13 +3433,8 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card) if (w->dapm != dai_w->dapm) continue; - switch (w->id) { - case snd_soc_dapm_dai_in: - case snd_soc_dapm_dai_out: + if (w->id == snd_soc_dapm_dai) continue; - default: - break; - } if (!w->sname) continue; diff --git a/trunk/sound/soc/soc-pcm.c b/trunk/sound/soc/soc-pcm.c index ccb6be4d658d..73bb8eefa491 100644 --- a/trunk/sound/soc/soc-pcm.c +++ b/trunk/sound/soc/soc-pcm.c @@ -928,13 +928,8 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream, /* Create any new FE <--> BE connections */ for (i = 0; i < list->num_widgets; i++) { - switch (list->widgets[i]->id) { - case snd_soc_dapm_dai_in: - case snd_soc_dapm_dai_out: - break; - default: + if (list->widgets[i]->id != snd_soc_dapm_dai) continue; - } /* is there a valid BE rtd for this widget */ be = dpcm_get_be(card, list->widgets[i], stream); @@ -2016,11 +2011,9 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num) if (cpu_dai->driver->capture.channels_min) capture = 1; } else { - if (codec_dai->driver->playback.channels_min && - cpu_dai->driver->playback.channels_min) + if (codec_dai->driver->playback.channels_min) playback = 1; - if (codec_dai->driver->capture.channels_min && - cpu_dai->driver->capture.channels_min) + if (codec_dai->driver->capture.channels_min) capture = 1; } diff --git a/trunk/sound/usb/6fire/firmware.c b/trunk/sound/usb/6fire/firmware.c index b9defcdeb7ef..a1d9b0792a1e 100644 --- a/trunk/sound/usb/6fire/firmware.c +++ b/trunk/sound/usb/6fire/firmware.c @@ -42,8 +42,8 @@ static const u8 ep_w_max_packet_size[] = { 0x94, 0x01, 0x5c, 0x02 /* alt 3: 404 EP2 and 604 EP6 (25 fpp) */ }; -static const u8 known_fw_versions[][2] = { - { 0x03, 0x01 } +static const u8 known_fw_versions[][4] = { + { 0x03, 0x01, 0x0b, 0x00 } }; struct ihex_record { @@ -343,7 +343,7 @@ static int usb6fire_fw_check(u8 *version) int i; for (i = 0; i < ARRAY_SIZE(known_fw_versions); i++) - if (!memcmp(version, known_fw_versions + i, 2)) + if (!memcmp(version, known_fw_versions + i, 4)) return 0; snd_printk(KERN_ERR PREFIX "invalid fimware version in device: %*ph. " diff --git a/trunk/sound/usb/card.c b/trunk/sound/usb/card.c index 64952e2d3ed1..1a033177b83f 100644 --- a/trunk/sound/usb/card.c +++ b/trunk/sound/usb/card.c @@ -147,32 +147,14 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int return -EINVAL; } - alts = &iface->altsetting[0]; - altsd = get_iface_desc(alts); - - /* - * Android with both accessory and audio interfaces enabled gets the - * interface numbers wrong. - */ - if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) || - chip->usb_id == USB_ID(0x18d1, 0x2d05)) && - interface == 0 && - altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC && - altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) { - interface = 2; - iface = usb_ifnum_to_if(dev, interface); - if (!iface) - return -EINVAL; - alts = &iface->altsetting[0]; - altsd = get_iface_desc(alts); - } - if (usb_interface_claimed(iface)) { snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n", dev->devnum, ctrlif, interface); return -EINVAL; } + alts = &iface->altsetting[0]; + altsd = get_iface_desc(alts); if ((altsd->bInterfaceClass == USB_CLASS_AUDIO || altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) && altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) { diff --git a/trunk/sound/usb/mixer.c b/trunk/sound/usb/mixer.c index d5438083fd6a..ca4739c3f650 100644 --- a/trunk/sound/usb/mixer.c +++ b/trunk/sound/usb/mixer.c @@ -885,9 +885,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, case USB_ID(0x046d, 0x0808): case USB_ID(0x046d, 0x0809): - case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ - case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ case USB_ID(0x046d, 0x0991): /* Most audio usb devices lie about volume resolution. * Most Logitech webcams have res = 384. diff --git a/trunk/sound/usb/proc.c b/trunk/sound/usb/proc.c index 5f761ab34c01..135c76871063 100644 --- a/trunk/sound/usb/proc.c +++ b/trunk/sound/usb/proc.c @@ -116,22 +116,21 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s } static void proc_dump_ep_status(struct snd_usb_substream *subs, - struct snd_usb_endpoint *data_ep, - struct snd_usb_endpoint *sync_ep, + struct snd_usb_endpoint *ep, struct snd_info_buffer *buffer) { - if (!data_ep) + if (!ep) return; - snd_iprintf(buffer, " Packet Size = %d\n", data_ep->curpacksize); + snd_iprintf(buffer, " Packet Size = %d\n", ep->curpacksize); snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n", subs->speed == USB_SPEED_FULL - ? get_full_speed_hz(data_ep->freqm) - : get_high_speed_hz(data_ep->freqm), - data_ep->freqm >> 16, data_ep->freqm & 0xffff); - if (sync_ep && data_ep->freqshift != INT_MIN) { - int res = 16 - data_ep->freqshift; + ? get_full_speed_hz(ep->freqm) + : get_high_speed_hz(ep->freqm), + ep->freqm >> 16, ep->freqm & 0xffff); + if (ep->freqshift != INT_MIN) { + int res = 16 - ep->freqshift; snd_iprintf(buffer, " Feedback Format = %d.%d\n", - (sync_ep->syncmaxsize > 3 ? 32 : 24) - res, res); + (ep->syncmaxsize > 3 ? 32 : 24) - res, res); } } @@ -141,7 +140,8 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn snd_iprintf(buffer, " Status: Running\n"); snd_iprintf(buffer, " Interface = %d\n", subs->interface); snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx); - proc_dump_ep_status(subs, subs->data_endpoint, subs->sync_endpoint, buffer); + proc_dump_ep_status(subs, subs->data_endpoint, buffer); + proc_dump_ep_status(subs, subs->sync_endpoint, buffer); } else { snd_iprintf(buffer, " Status: Stop\n"); } diff --git a/trunk/sound/usb/quirks-table.h b/trunk/sound/usb/quirks-table.h index 8b75bcf136f6..7f1722f82c89 100644 --- a/trunk/sound/usb/quirks-table.h +++ b/trunk/sound/usb/quirks-table.h @@ -215,13 +215,7 @@ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL }, { - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | - USB_DEVICE_ID_MATCH_INT_CLASS | - USB_DEVICE_ID_MATCH_INT_SUBCLASS, - .idVendor = 0x046d, - .idProduct = 0x0990, - .bInterfaceClass = USB_CLASS_AUDIO, - .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, + USB_DEVICE(0x046d, 0x0990), .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { .vendor_name = "Logitech, Inc.", .product_name = "QuickCam Pro 9000", @@ -1798,11 +1792,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), USB_DEVICE_VENDOR_SPEC(0x0582, 0x0108), .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { .ifnum = 0, - .type = QUIRK_MIDI_FIXED_ENDPOINT, - .data = & (const struct snd_usb_midi_endpoint_info) { - .out_cables = 0x0007, - .in_cables = 0x0007 - } + .type = QUIRK_MIDI_STANDARD_INTERFACE } }, { diff --git a/trunk/tools/perf/scripts/python/net_dropmonitor.py b/trunk/tools/perf/scripts/python/net_dropmonitor.py index b5740599aabd..a4ffc9500023 100755 --- a/trunk/tools/perf/scripts/python/net_dropmonitor.py +++ b/trunk/tools/perf/scripts/python/net_dropmonitor.py @@ -15,38 +15,35 @@ def get_kallsyms_table(): global kallsyms - try: f = open("/proc/kallsyms", "r") + linecount = 0 + for line in f: + linecount = linecount+1 + f.seek(0) except: return + + j = 0 for line in f: loc = int(line.split()[0], 16) name = line.split()[2] - kallsyms.append((loc, name)) + j = j +1 + if ((j % 100) == 0): + print "\r" + str(j) + "/" + str(linecount), + kallsyms.append({ 'loc': loc, 'name' : name}) + + print "\r" + str(j) + "/" + str(linecount) kallsyms.sort() + return def get_sym(sloc): loc = int(sloc) - - # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start - # kallsyms[i][0] > loc for all end <= i < len(kallsyms) - start, end = -1, len(kallsyms) - while end != start + 1: - pivot = (start + end) // 2 - if loc < kallsyms[pivot][0]: - end = pivot - else: - start = pivot - - # Now (start == -1 or kallsyms[start][0] <= loc) - # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0]) - if start >= 0: - symloc, name = kallsyms[start] - return (name, loc - symloc) - else: - return (None, 0) + for i in kallsyms: + if (i['loc'] >= loc): + return (i['name'], i['loc']-loc) + return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") @@ -67,7 +64,7 @@ def trace_end(): # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, - skbaddr, location, protocol): + skbaddr, protocol, location): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 diff --git a/trunk/tools/power/x86/turbostat/turbostat.c b/trunk/tools/power/x86/turbostat/turbostat.c index fe702076ca46..9e9d34871195 100644 --- a/trunk/tools/power/x86/turbostat/turbostat.c +++ b/trunk/tools/power/x86/turbostat/turbostat.c @@ -2191,7 +2191,7 @@ int initialize_counters(int cpu_id) void allocate_output_buffer() { - output_buffer = calloc(1, (1 + topo.num_cpus) * 256); + output_buffer = calloc(1, (1 + topo.num_cpus) * 128); outp = output_buffer; if (outp == NULL) { perror("calloc"); diff --git a/trunk/tools/testing/selftests/Makefile b/trunk/tools/testing/selftests/Makefile index 0a63658065f0..d4abc59ce1d9 100644 --- a/trunk/tools/testing/selftests/Makefile +++ b/trunk/tools/testing/selftests/Makefile @@ -6,6 +6,7 @@ TARGETS += memory-hotplug TARGETS += mqueue TARGETS += net TARGETS += ptrace +TARGETS += soft-dirty TARGETS += vm all: diff --git a/trunk/tools/testing/selftests/soft-dirty/Makefile b/trunk/tools/testing/selftests/soft-dirty/Makefile new file mode 100644 index 000000000000..a9cdc823d6e0 --- /dev/null +++ b/trunk/tools/testing/selftests/soft-dirty/Makefile @@ -0,0 +1,10 @@ +CFLAGS += -iquote../../../../include/uapi -Wall +soft-dirty: soft-dirty.c + +all: soft-dirty + +clean: + rm -f soft-dirty + +run_tests: all + @./soft-dirty || echo "soft-dirty selftests: [FAIL]" diff --git a/trunk/tools/testing/selftests/soft-dirty/soft-dirty.c b/trunk/tools/testing/selftests/soft-dirty/soft-dirty.c new file mode 100644 index 000000000000..aba4f87f87f0 --- /dev/null +++ b/trunk/tools/testing/selftests/soft-dirty/soft-dirty.c @@ -0,0 +1,114 @@ +#include +#include +#include +#include +#include +#include + +typedef unsigned long long u64; + +#define PME_PRESENT (1ULL << 63) +#define PME_SOFT_DIRTY (1Ull << 55) + +#define PAGES_TO_TEST 3 +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + +static void get_pagemap2(char *mem, u64 *map) +{ + int fd; + + fd = open("/proc/self/pagemap2", O_RDONLY); + if (fd < 0) { + perror("Can't open pagemap2"); + exit(1); + } + + lseek(fd, (unsigned long)mem / PAGE_SIZE * sizeof(u64), SEEK_SET); + read(fd, map, sizeof(u64) * PAGES_TO_TEST); + close(fd); +} + +static inline char map_p(u64 map) +{ + return map & PME_PRESENT ? 'p' : '-'; +} + +static inline char map_sd(u64 map) +{ + return map & PME_SOFT_DIRTY ? 'd' : '-'; +} + +static int check_pte(int step, int page, u64 *map, u64 want) +{ + if ((map[page] & want) != want) { + printf("Step %d Page %d has %c%c, want %c%c\n", + step, page, + map_p(map[page]), map_sd(map[page]), + map_p(want), map_sd(want)); + return 1; + } + + return 0; +} + +static void clear_refs(void) +{ + int fd; + char *v = "4"; + + fd = open("/proc/self/clear_refs", O_WRONLY); + if (write(fd, v, 3) < 3) { + perror("Can't clear soft-dirty bit"); + exit(1); + } + close(fd); +} + +int main(void) +{ + char *mem, x; + u64 map[PAGES_TO_TEST]; + + mem = mmap(NULL, PAGES_TO_TEST * PAGE_SIZE, + PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0); + + x = mem[0]; + mem[2 * PAGE_SIZE] = 'c'; + get_pagemap2(mem, map); + + if (check_pte(1, 0, map, PME_PRESENT)) + return 1; + if (check_pte(1, 1, map, 0)) + return 1; + if (check_pte(1, 2, map, PME_PRESENT | PME_SOFT_DIRTY)) + return 1; + + clear_refs(); + get_pagemap2(mem, map); + + if (check_pte(2, 0, map, PME_PRESENT)) + return 1; + if (check_pte(2, 1, map, 0)) + return 1; + if (check_pte(2, 2, map, PME_PRESENT)) + return 1; + + mem[0] = 'a'; + mem[PAGE_SIZE] = 'b'; + x = mem[2 * PAGE_SIZE]; + get_pagemap2(mem, map); + + if (check_pte(3, 0, map, PME_PRESENT | PME_SOFT_DIRTY)) + return 1; + if (check_pte(3, 1, map, PME_PRESENT | PME_SOFT_DIRTY)) + return 1; + if (check_pte(3, 2, map, PME_PRESENT)) + return 1; + + (void)x; /* gcc warn */ + + printf("PASS\n"); + return 0; +}