diff --git a/[refs] b/[refs]
index 4d7076f267c1..a09c9b701ee7 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 3f490f7f99053288bd85563f8d9b5032b810e177
+refs/heads/master: efad7e6b1a28be599836c8f15ec04f99a98fb04c
diff --git a/trunk/Documentation/DocBook/media/v4l/dev-codec.xml b/trunk/Documentation/DocBook/media/v4l/dev-codec.xml
index ff44c16fc080..dca0ecd54dc6 100644
--- a/trunk/Documentation/DocBook/media/v4l/dev-codec.xml
+++ b/trunk/Documentation/DocBook/media/v4l/dev-codec.xml
@@ -1,27 +1,18 @@
Codec Interface
- A V4L2 codec can compress, decompress, transform, or otherwise
-convert video data from one format into another format, in memory. Typically
-such devices are memory-to-memory devices (i.e. devices with the
-V4L2_CAP_VIDEO_M2M or V4L2_CAP_VIDEO_M2M_MPLANE
-capability set).
-
+
+ Suspended
- A memory-to-memory video node acts just like a normal video node, but it
-supports both output (sending frames from memory to the codec hardware) and
-capture (receiving the processed frames from the codec hardware into memory)
-stream I/O. An application will have to setup the stream
-I/O for both sides and finally call &VIDIOC-STREAMON; for both capture and output
-to start the codec.
+ This interface has been be suspended from the V4L2 API
+implemented in Linux 2.6 until we have more experience with codec
+device interfaces.
+
- Video compression codecs use the MPEG controls to setup their codec parameters
-(note that the MPEG controls actually support many more codecs than just MPEG).
-See .
+ A V4L2 codec can compress, decompress, transform, or otherwise
+convert video data from one format into another format, in memory.
+Applications send data to be converted to the driver through a
+&func-write; call, and receive the converted data through a
+&func-read; call. For efficiency a driver may also support streaming
+I/O.
- Memory-to-memory devices can often be used as a shared resource: you can
-open the video node multiple times, each application setting up their own codec properties
-that are local to the file handle, and each can use it independently from the others.
-The driver will arbitrate access to the codec and reprogram it whenever another file
-handler gets access. This is different from the usual video node behavior where the video properties
-are global to the device (i.e. changing something through one file handle is visible
-through another file handle).
+ [to do]
diff --git a/trunk/Documentation/DocBook/media/v4l/v4l2.xml b/trunk/Documentation/DocBook/media/v4l/v4l2.xml
index bfe823dd0f31..bfc93cdcf696 100644
--- a/trunk/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/trunk/Documentation/DocBook/media/v4l/v4l2.xml
@@ -493,7 +493,7 @@ and discussions on the V4L mailing list.
Video for Linux Two API Specification
- Revision 3.10
+ Revision 3.9
&sub-common;
diff --git a/trunk/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt b/trunk/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
index de9f6b78ee51..3f62adfb3e0b 100644
--- a/trunk/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
+++ b/trunk/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
@@ -2,7 +2,7 @@ Exynos4x12/Exynos5 SoC series camera host interface (FIMC-LITE)
Required properties:
-- compatible : should be "samsung,exynos4212-fimc-lite" for Exynos4212 and
+- compatible : should be "samsung,exynos4212-fimc" for Exynos4212 and
Exynos4412 SoCs;
- reg : physical base address and size of the device memory mapped
registers;
diff --git a/trunk/Documentation/filesystems/Locking b/trunk/Documentation/filesystems/Locking
index 9858f337529c..0706d32a61e6 100644
--- a/trunk/Documentation/filesystems/Locking
+++ b/trunk/Documentation/filesystems/Locking
@@ -189,7 +189,7 @@ prototypes:
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
- void (*invalidatepage) (struct page *, unsigned int, unsigned int);
+ int (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@@ -310,8 +310,8 @@ filesystems and by the swapper. The latter will eventually go away. Please,
keep it that way and don't breed new callers.
->invalidatepage() is called when the filesystem must attempt to drop
-some or all of the buffers from the page when it is being truncated. It
-returns zero on success. If ->invalidatepage is zero, the kernel uses
+some or all of the buffers from the page when it is being truncated. It
+returns zero on success. If ->invalidatepage is zero, the kernel uses
block_invalidatepage() instead.
->releasepage() is called when the kernel is about to try to drop the
@@ -414,7 +414,7 @@ prototypes:
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
- int (*iterate) (struct file *, struct dir_context *);
+ int (*readdir) (struct file *, void *, filldir_t);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
diff --git a/trunk/Documentation/filesystems/f2fs.txt b/trunk/Documentation/filesystems/f2fs.txt
index b91e2f26b672..bd3c56c67380 100644
--- a/trunk/Documentation/filesystems/f2fs.txt
+++ b/trunk/Documentation/filesystems/f2fs.txt
@@ -98,13 +98,8 @@ Cleaning Overhead
MOUNT OPTIONS
================================================================================
-background_gc=%s Turn on/off cleaning operations, namely garbage
- collection, triggered in background when I/O subsystem is
- idle. If background_gc=on, it will turn on the garbage
- collection and if background_gc=off, garbage collection
- will be truned off.
- Default value for this option is on. So garbage
- collection is on by default.
+background_gc_off Turn off cleaning operations, namely garbage collection,
+ triggered in background when I/O subsystem is idle.
disable_roll_forward Disable the roll-forward recovery routine
discard Issue discard/TRIM commands when a segment is cleaned.
no_heap Disable heap-style segment allocation which finds free
diff --git a/trunk/Documentation/filesystems/porting b/trunk/Documentation/filesystems/porting
index 206a1bdc7321..4db22f6491e0 100644
--- a/trunk/Documentation/filesystems/porting
+++ b/trunk/Documentation/filesystems/porting
@@ -445,9 +445,3 @@ object doesn't exist. It's remote/distributed ones that might care...
[mandatory]
FS_REVAL_DOT is gone; if you used to have it, add ->d_weak_revalidate()
in your dentry operations instead.
---
-[mandatory]
- vfs_readdir() is gone; switch to iterate_dir() instead
---
-[mandatory]
- ->readdir() is gone now; switch to ->iterate()
diff --git a/trunk/Documentation/filesystems/vfs.txt b/trunk/Documentation/filesystems/vfs.txt
index e6bd1ffd821e..bc4b06b3160a 100644
--- a/trunk/Documentation/filesystems/vfs.txt
+++ b/trunk/Documentation/filesystems/vfs.txt
@@ -549,7 +549,7 @@ struct address_space_operations
-------------------------------
This describes how the VFS can manipulate mapping of a file to page cache in
-your filesystem. The following members are defined:
+your filesystem. As of kernel 2.6.22, the following members are defined:
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
@@ -566,7 +566,7 @@ struct address_space_operations {
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
- void (*invalidatepage) (struct page *, unsigned int, unsigned int);
+ int (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@@ -685,14 +685,14 @@ struct address_space_operations {
invalidatepage: If a page has PagePrivate set, then invalidatepage
will be called when part or all of the page is to be removed
from the address space. This generally corresponds to either a
- truncation, punch hole or a complete invalidation of the address
- space (in the latter case 'offset' will always be 0 and 'length'
- will be PAGE_CACHE_SIZE). Any private data associated with the page
- should be updated to reflect this truncation. If offset is 0 and
- length is PAGE_CACHE_SIZE, then the private data should be released,
- because the page must be able to be completely discarded. This may
- be done by calling the ->releasepage function, but in this case the
- release MUST succeed.
+ truncation or a complete invalidation of the address space
+ (in the latter case 'offset' will always be 0).
+ Any private data associated with the page should be updated
+ to reflect this truncation. If offset is 0, then
+ the private data should be released, because the page
+ must be able to be completely discarded. This may be done by
+ calling the ->releasepage function, but in this case the
+ release MUST succeed.
releasepage: releasepage is called on PagePrivate pages to indicate
that the page should be freed if possible. ->releasepage
@@ -777,7 +777,7 @@ struct file_operations {
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
- int (*iterate) (struct file *, struct dir_context *);
+ int (*readdir) (struct file *, void *, filldir_t);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
@@ -815,7 +815,7 @@ otherwise noted.
aio_write: called by io_submit(2) and other asynchronous I/O operations
- iterate: called when the VFS needs to read the directory contents
+ readdir: called when the VFS needs to read the directory contents
poll: called by the VFS when a process wants to check if there is
activity on this file and (optionally) go to sleep until there
diff --git a/trunk/Documentation/networking/ip-sysctl.txt b/trunk/Documentation/networking/ip-sysctl.txt
index 3458d6343e01..f98ca633b528 100644
--- a/trunk/Documentation/networking/ip-sysctl.txt
+++ b/trunk/Documentation/networking/ip-sysctl.txt
@@ -420,10 +420,10 @@ tcp_synack_retries - INTEGER
for a passive TCP connection will happen after 63seconds.
tcp_syncookies - BOOLEAN
- Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
+ Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
Send out syncookies when the syn backlog queue of a socket
overflows. This is to prevent against the common 'SYN flood attack'
- Default: 1
+ Default: FALSE
Note, that syncookies is fallback facility.
It MUST NOT be used to help highly loaded servers to stand
diff --git a/trunk/Documentation/sound/alsa/HD-Audio-Models.txt b/trunk/Documentation/sound/alsa/HD-Audio-Models.txt
index 77d68e23b247..bb8b0dc532b8 100644
--- a/trunk/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/trunk/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -29,8 +29,6 @@ ALC269/270/275/276/280/282
alc271-dmic Enable ALC271X digital mic workaround
inv-dmic Inverted internal mic workaround
lenovo-dock Enables docking station I/O for some Lenovos
- dell-headset-multi Headset jack, which can also be used as mic-in
- dell-headset-dock Headset jack (without mic-in), and also dock I/O
ALC662/663/272
==============
@@ -44,7 +42,6 @@ ALC662/663/272
asus-mode7 ASUS
asus-mode8 ASUS
inv-dmic Inverted internal mic workaround
- dell-headset-multi Headset jack, which can also be used as mic-in
ALC680
======
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index ad7e322ad17b..5be702cc8449 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -3220,7 +3220,7 @@ F: lib/fault-inject.c
FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
M: Robert Love
-L: fcoe-devel@open-fcoe.org
+L: devel@open-fcoe.org
W: www.Open-FCoE.org
S: Supported
F: drivers/scsi/libfc/
diff --git a/trunk/Makefile b/trunk/Makefile
index e5e3ba085191..90400165125e 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 10
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc5
NAME = Unicycling Gorilla
# *DOCUMENTATION*
diff --git a/trunk/arch/alpha/include/asm/pgtable.h b/trunk/arch/alpha/include/asm/pgtable.h
index d8f9b7e89234..81a4342d5a3f 100644
--- a/trunk/arch/alpha/include/asm/pgtable.h
+++ b/trunk/arch/alpha/include/asm/pgtable.h
@@ -354,6 +354,9 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define kern_addr_valid(addr) (1)
#endif
+#define io_remap_pfn_range(vma, start, pfn, size, prot) \
+ remap_pfn_range(vma, start, pfn, size, prot)
+
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c
index 1402fcc11c2c..b9e37ad6fa19 100644
--- a/trunk/arch/alpha/kernel/osf_sys.c
+++ b/trunk/arch/alpha/kernel/osf_sys.c
@@ -96,7 +96,6 @@ struct osf_dirent {
};
struct osf_dirent_callback {
- struct dir_context ctx;
struct osf_dirent __user *dirent;
long __user *basep;
unsigned int count;
@@ -147,17 +146,17 @@ SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd,
{
int error;
struct fd arg = fdget(fd);
- struct osf_dirent_callback buf = {
- .ctx.actor = osf_filldir,
- .dirent = dirent,
- .basep = basep,
- .count = count
- };
+ struct osf_dirent_callback buf;
if (!arg.file)
return -EBADF;
- error = iterate_dir(arg.file, &buf.ctx);
+ buf.dirent = dirent;
+ buf.basep = basep;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(arg.file, osf_filldir, &buf);
if (error >= 0)
error = buf.error;
if (count != buf.count)
diff --git a/trunk/arch/alpha/kernel/pci-sysfs.c b/trunk/arch/alpha/kernel/pci-sysfs.c
index 2b183b0d3207..b51f7b4818cd 100644
--- a/trunk/arch/alpha/kernel/pci-sysfs.c
+++ b/trunk/arch/alpha/kernel/pci-sysfs.c
@@ -26,6 +26,7 @@ static int hose_mmap_page_range(struct pci_controller *hose,
base = sparse ? hose->sparse_io_base : hose->dense_io_base;
vma->vm_pgoff += base >> PAGE_SHIFT;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
diff --git a/trunk/arch/arc/include/asm/pgtable.h b/trunk/arch/arc/include/asm/pgtable.h
index c110ac87d22b..95b1522212a7 100644
--- a/trunk/arch/arc/include/asm/pgtable.h
+++ b/trunk/arch/arc/include/asm/pgtable.h
@@ -394,6 +394,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
*/
+#define io_remap_pfn_range(vma, from, pfn, size, prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
#include
/* to cope with aliasing VIPT cache */
diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig
index 136f263ed47b..49d993cee512 100644
--- a/trunk/arch/arm/Kconfig
+++ b/trunk/arch/arm/Kconfig
@@ -1087,20 +1087,6 @@ if !MMU
source "arch/arm/Kconfig-nommu"
endif
-config PJ4B_ERRATA_4742
- bool "PJ4B Errata 4742: IDLE Wake Up Commands can Cause the CPU Core to Cease Operation"
- depends on CPU_PJ4B && MACH_ARMADA_370
- default y
- help
- When coming out of either a Wait for Interrupt (WFI) or a Wait for
- Event (WFE) IDLE states, a specific timing sensitivity exists between
- the retiring WFI/WFE instructions and the newly issued subsequent
- instructions. This sensitivity can result in a CPU hang scenario.
- Workaround:
- The software must insert either a Data Synchronization Barrier (DSB)
- or Data Memory Barrier (DMB) command immediately after the WFI/WFE
- instruction
-
config ARM_ERRATA_326103
bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
depends on CPU_V6
@@ -1203,16 +1189,6 @@ config PL310_ERRATA_588369
is not correctly implemented in PL310 as clean lines are not
invalidated as a result of these operations.
-config ARM_ERRATA_643719
- bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
- depends on CPU_V7 && SMP
- help
- This option enables the workaround for the 643719 Cortex-A9 (prior to
- r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
- register returns zero when it should return one. The workaround
- corrects this value, ensuring cache maintenance operations which use
- it behave as intended and avoiding data corruption.
-
config ARM_ERRATA_720789
bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
depends on CPU_V7
@@ -2030,7 +2006,7 @@ config XIP_PHYS_ADDR
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
- depends on (!SMP || PM_SLEEP_SMP)
+ depends on (!SMP || HOTPLUG_CPU)
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/trunk/arch/arm/boot/compressed/Makefile b/trunk/arch/arm/boot/compressed/Makefile
index 120b83bfde20..79e9bdbfc491 100644
--- a/trunk/arch/arm/boot/compressed/Makefile
+++ b/trunk/arch/arm/boot/compressed/Makefile
@@ -116,8 +116,7 @@ targets := vmlinux vmlinux.lds \
# Make sure files are removed during clean
extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
- lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \
- hyp-stub.S
+ lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
diff --git a/trunk/arch/arm/boot/dts/am33xx.dtsi b/trunk/arch/arm/boot/dts/am33xx.dtsi
index 8e1248f01fab..1460d9b88adf 100644
--- a/trunk/arch/arm/boot/dts/am33xx.dtsi
+++ b/trunk/arch/arm/boot/dts/am33xx.dtsi
@@ -409,8 +409,8 @@
ti,hwmods = "gpmc";
reg = <0x50000000 0x2000>;
interrupts = <100>;
- gpmc,num-cs = <7>;
- gpmc,num-waitpins = <2>;
+ num-cs = <7>;
+ num-waitpins = <2>;
#address-cells = <2>;
#size-cells = <1>;
status = "disabled";
diff --git a/trunk/arch/arm/boot/dts/armada-xp-gp.dts b/trunk/arch/arm/boot/dts/armada-xp-gp.dts
index 76db557adbe7..3ee63d128e27 100644
--- a/trunk/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/trunk/arch/arm/boot/dts/armada-xp-gp.dts
@@ -39,9 +39,8 @@
};
soc {
- ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */
- 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
- 0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB */>;
+ ranges = <0 0 0xd0000000 0x100000
+ 0xf0000000 0 0xf0000000 0x1000000>;
internal-regs {
serial@12000 {
diff --git a/trunk/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/trunk/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index fdea75c73411..46b785064dd8 100644
--- a/trunk/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/trunk/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -27,9 +27,8 @@
};
soc {
- ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */
- 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
- 0xf0000000 0 0xf0000000 0x8000000 /* Device Bus, NOR 128MiB */>;
+ ranges = <0 0 0xd0000000 0x100000
+ 0xf0000000 0 0xf0000000 0x8000000>;
internal-regs {
serial@12000 {
diff --git a/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
index ded558bb0f3b..d1650fb34c0a 100644
--- a/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
+++ b/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
@@ -763,7 +763,7 @@
};
};
- pinctrl@03860000 {
+ pinctrl@03680000 {
gpz: gpz {
gpio-controller;
#gpio-cells = <2>;
diff --git a/trunk/arch/arm/boot/dts/exynos5250.dtsi b/trunk/arch/arm/boot/dts/exynos5250.dtsi
index fc9fb3d526e2..0673524238a6 100644
--- a/trunk/arch/arm/boot/dts/exynos5250.dtsi
+++ b/trunk/arch/arm/boot/dts/exynos5250.dtsi
@@ -161,9 +161,9 @@
interrupts = <0 50 0>;
};
- pinctrl_3: pinctrl@03860000 {
+ pinctrl_3: pinctrl@03680000 {
compatible = "samsung,exynos5250-pinctrl";
- reg = <0x03860000 0x1000>;
+ reg = <0x0368000 0x1000>;
interrupts = <0 47 0>;
};
diff --git a/trunk/arch/arm/boot/dts/omap4-panda-common.dtsi b/trunk/arch/arm/boot/dts/omap4-panda-common.dtsi
index eeb734e25709..03bd60deb52b 100644
--- a/trunk/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/trunk/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -56,23 +56,9 @@
};
};
-&omap4_pmx_wkup {
- pinctrl-names = "default";
- pinctrl-0 = <
- &twl6030_wkup_pins
- >;
-
- twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
- pinctrl-single,pins = <
- 0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
- >;
- };
-};
-
&omap4_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
- &twl6030_pins
&twl6040_pins
&mcpdm_pins
&mcbsp1_pins
@@ -80,12 +66,6 @@
&tpd12s015_pins
>;
- twl6030_pins: pinmux_twl6030_pins {
- pinctrl-single,pins = <
- 0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
- >;
- };
-
twl6040_pins: pinmux_twl6040_pins {
pinctrl-single,pins = <
0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */
diff --git a/trunk/arch/arm/boot/dts/omap4-sdp.dts b/trunk/arch/arm/boot/dts/omap4-sdp.dts
index 98505a2ef162..a35d9cd58063 100644
--- a/trunk/arch/arm/boot/dts/omap4-sdp.dts
+++ b/trunk/arch/arm/boot/dts/omap4-sdp.dts
@@ -142,23 +142,9 @@
};
};
-&omap4_pmx_wkup {
- pinctrl-names = "default";
- pinctrl-0 = <
- &twl6030_wkup_pins
- >;
-
- twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
- pinctrl-single,pins = <
- 0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
- >;
- };
-};
-
&omap4_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
- &twl6030_pins
&twl6040_pins
&mcpdm_pins
&dmic_pins
@@ -193,12 +179,6 @@
>;
};
- twl6030_pins: pinmux_twl6030_pins {
- pinctrl-single,pins = <
- 0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
- >;
- };
-
twl6040_pins: pinmux_twl6040_pins {
pinctrl-single,pins = <
0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */
diff --git a/trunk/arch/arm/boot/dts/omap5.dtsi b/trunk/arch/arm/boot/dts/omap5.dtsi
index 635cae283011..3dd7ff825828 100644
--- a/trunk/arch/arm/boot/dts/omap5.dtsi
+++ b/trunk/arch/arm/boot/dts/omap5.dtsi
@@ -538,7 +538,6 @@
interrupts = <0 41 0x4>;
ti,hwmods = "timer5";
ti,timer-dsp;
- ti,timer-pwm;
};
timer6: timer@4013a000 {
@@ -575,7 +574,6 @@
reg = <0x4803e000 0x80>;
interrupts = <0 45 0x4>;
ti,hwmods = "timer9";
- ti,timer-pwm;
};
timer10: timer@48086000 {
@@ -583,7 +581,6 @@
reg = <0x48086000 0x80>;
interrupts = <0 46 0x4>;
ti,hwmods = "timer10";
- ti,timer-pwm;
};
timer11: timer@48088000 {
diff --git a/trunk/arch/arm/include/asm/cacheflush.h b/trunk/arch/arm/include/asm/cacheflush.h
index 17d0ae8672fa..bff71388e72a 100644
--- a/trunk/arch/arm/include/asm/cacheflush.h
+++ b/trunk/arch/arm/include/asm/cacheflush.h
@@ -320,7 +320,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-extern void flush_kernel_dcache_page(struct page *);
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+}
#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
diff --git a/trunk/arch/arm/include/asm/cputype.h b/trunk/arch/arm/include/asm/cputype.h
index dba62cb1ad08..7652712d1d14 100644
--- a/trunk/arch/arm/include/asm/cputype.h
+++ b/trunk/arch/arm/include/asm/cputype.h
@@ -32,8 +32,6 @@
#define MPIDR_HWID_BITMASK 0xFFFFFF
-#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
-
#define MPIDR_LEVEL_BITS 8
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
diff --git a/trunk/arch/arm/include/asm/glue-proc.h b/trunk/arch/arm/include/asm/glue-proc.h
index 8017e94acc5e..ac1dd54724b6 100644
--- a/trunk/arch/arm/include/asm/glue-proc.h
+++ b/trunk/arch/arm/include/asm/glue-proc.h
@@ -230,15 +230,6 @@
# endif
#endif
-#ifdef CONFIG_CPU_PJ4B
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_pj4b
-# endif
-#endif
-
#ifndef MULTI_CPU
#define cpu_proc_init __glue(CPU_NAME,_proc_init)
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
diff --git a/trunk/arch/arm/include/asm/pgtable-nommu.h b/trunk/arch/arm/include/asm/pgtable-nommu.h
index 0642228ff785..7ec60d6075bf 100644
--- a/trunk/arch/arm/include/asm/pgtable-nommu.h
+++ b/trunk/arch/arm/include/asm/pgtable-nommu.h
@@ -79,6 +79,8 @@ extern unsigned int kobjsize(const void *objp);
* No page table caches to initialise.
*/
#define pgtable_cache_init() do { } while (0)
+#define io_remap_pfn_range remap_pfn_range
+
/*
* All 32bit addresses are effectively valid for vmalloc...
diff --git a/trunk/arch/arm/include/asm/pgtable.h b/trunk/arch/arm/include/asm/pgtable.h
index 229e0dde9c71..9bcd262a9008 100644
--- a/trunk/arch/arm/include/asm/pgtable.h
+++ b/trunk/arch/arm/include/asm/pgtable.h
@@ -318,6 +318,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
diff --git a/trunk/arch/arm/include/asm/smp_plat.h b/trunk/arch/arm/include/asm/smp_plat.h
index e78983202737..aaa61b6f50ff 100644
--- a/trunk/arch/arm/include/asm/smp_plat.h
+++ b/trunk/arch/arm/include/asm/smp_plat.h
@@ -49,7 +49,7 @@ static inline int cache_ops_need_broadcast(void)
/*
* Logical CPU mapping.
*/
-extern u32 __cpu_logical_map[];
+extern int __cpu_logical_map[];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
/*
* Retrieve logical cpu index corresponding to a given MPIDR[23:0]
diff --git a/trunk/arch/arm/kernel/devtree.c b/trunk/arch/arm/kernel/devtree.c
index 5859c8bc727c..5af04f6daa33 100644
--- a/trunk/arch/arm/kernel/devtree.c
+++ b/trunk/arch/arm/kernel/devtree.c
@@ -82,7 +82,7 @@ void __init arm_dt_init_cpu_maps(void)
u32 i, j, cpuidx = 1;
u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
- u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
+ u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX };
bool bootcpu_valid = false;
cpus = of_find_node_by_path("/cpus");
@@ -92,9 +92,6 @@ void __init arm_dt_init_cpu_maps(void)
for_each_child_of_node(cpus, cpu) {
u32 hwid;
- if (of_node_cmp(cpu->type, "cpu"))
- continue;
-
pr_debug(" * %s...\n", cpu->full_name);
/*
* A device tree containing CPU nodes with missing "reg"
@@ -152,10 +149,9 @@ void __init arm_dt_init_cpu_maps(void)
tmp_map[i] = hwid;
}
- if (!bootcpu_valid) {
- pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n");
+ if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], "
+ "fall back to default cpu_logical_map\n"))
return;
- }
/*
* Since the boot CPU node contains proper data, and all nodes have
diff --git a/trunk/arch/arm/kernel/machine_kexec.c b/trunk/arch/arm/kernel/machine_kexec.c
index 4fb074c446bf..8ef8c9337809 100644
--- a/trunk/arch/arm/kernel/machine_kexec.c
+++ b/trunk/arch/arm/kernel/machine_kexec.c
@@ -134,10 +134,6 @@ void machine_kexec(struct kimage *image)
unsigned long reboot_code_buffer_phys;
void *reboot_code_buffer;
- if (num_online_cpus() > 1) {
- pr_err("kexec: error: multiple CPUs still online\n");
- return;
- }
page_list = image->head & PAGE_MASK;
diff --git a/trunk/arch/arm/kernel/process.c b/trunk/arch/arm/kernel/process.c
index 6e8931ccf13e..282de4826abb 100644
--- a/trunk/arch/arm/kernel/process.c
+++ b/trunk/arch/arm/kernel/process.c
@@ -184,61 +184,30 @@ int __init reboot_setup(char *str)
__setup("reboot=", reboot_setup);
-/*
- * Called by kexec, immediately prior to machine_kexec().
- *
- * This must completely disable all secondary CPUs; simply causing those CPUs
- * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
- * kexec'd kernel to use any and all RAM as it sees fit, without having to
- * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
- * functionality embodied in disable_nonboot_cpus() to achieve this.
- */
void machine_shutdown(void)
{
- disable_nonboot_cpus();
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
}
-/*
- * Halting simply requires that the secondary CPUs stop performing any
- * activity (executing tasks, handling interrupts). smp_send_stop()
- * achieves this.
- */
void machine_halt(void)
{
- smp_send_stop();
-
+ machine_shutdown();
local_irq_disable();
while (1);
}
-/*
- * Power-off simply requires that the secondary CPUs stop performing any
- * activity (executing tasks, handling interrupts). smp_send_stop()
- * achieves this. When the system power is turned off, it will take all CPUs
- * with it.
- */
void machine_power_off(void)
{
- smp_send_stop();
-
+ machine_shutdown();
if (pm_power_off)
pm_power_off();
}
-/*
- * Restart requires that the secondary CPUs stop performing any activity
- * while the primary CPU resets the system. Systems with a single CPU can
- * use soft_restart() as their machine descriptor's .restart hook, since that
- * will cause the only available CPU to reset. Systems with multiple CPUs must
- * provide a HW restart implementation, to ensure that all CPUs reset at once.
- * This is required so that any code running after reset on the primary CPU
- * doesn't have to co-ordinate with other CPUs to ensure they aren't still
- * executing pre-reset code, and using RAM that the primary CPU's code wishes
- * to use. Implementing such co-ordination would be essentially impossible.
- */
void machine_restart(char *cmd)
{
- smp_send_stop();
+ machine_shutdown();
arm_pm_restart(reboot_mode, cmd);
diff --git a/trunk/arch/arm/kernel/setup.c b/trunk/arch/arm/kernel/setup.c
index b4b1d397592b..1522c7ae31b0 100644
--- a/trunk/arch/arm/kernel/setup.c
+++ b/trunk/arch/arm/kernel/setup.c
@@ -444,7 +444,7 @@ void notrace cpu_init(void)
: "r14");
}
-u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
+int __cpu_logical_map[NR_CPUS];
void __init smp_setup_processor_id(void)
{
diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c
index 5919eb451bb9..550d63cef68e 100644
--- a/trunk/arch/arm/kernel/smp.c
+++ b/trunk/arch/arm/kernel/smp.c
@@ -651,6 +651,17 @@ void smp_send_reschedule(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
+#ifdef CONFIG_HOTPLUG_CPU
+static void smp_kill_cpus(cpumask_t *mask)
+{
+ unsigned int cpu;
+ for_each_cpu(cpu, mask)
+ platform_cpu_kill(cpu);
+}
+#else
+static void smp_kill_cpus(cpumask_t *mask) { }
+#endif
+
void smp_send_stop(void)
{
unsigned long timeout;
@@ -668,6 +679,8 @@ void smp_send_stop(void)
if (num_online_cpus() > 1)
pr_warning("SMP: failed to stop secondary CPUs\n");
+
+ smp_kill_cpus(&mask);
}
/*
diff --git a/trunk/arch/arm/mach-kirkwood/mpp.c b/trunk/arch/arm/mach-kirkwood/mpp.c
index e96fd71abd76..827cde42414f 100644
--- a/trunk/arch/arm/mach-kirkwood/mpp.c
+++ b/trunk/arch/arm/mach-kirkwood/mpp.c
@@ -22,10 +22,9 @@ static unsigned int __init kirkwood_variant(void)
kirkwood_pcie_id(&dev, &rev);
- if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0)
+ if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) ||
+ (dev == MV88F6282_DEV_ID))
return MPP_F6281_MASK;
- if (dev == MV88F6282_DEV_ID)
- return MPP_F6282_MASK;
if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
return MPP_F6192_MASK;
if (dev == MV88F6180_DEV_ID)
diff --git a/trunk/arch/arm/mach-omap2/clock36xx.c b/trunk/arch/arm/mach-omap2/clock36xx.c
index bbd6a3f717e6..8f3bf4e50908 100644
--- a/trunk/arch/arm/mach-omap2/clock36xx.c
+++ b/trunk/arch/arm/mach-omap2/clock36xx.c
@@ -20,12 +20,11 @@
#include
#include
-#include
#include
#include "clock.h"
#include "clock36xx.h"
-#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
/**
* omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
@@ -40,28 +39,29 @@
*/
int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
{
- struct clk_divider *parent;
+ struct clk_hw_omap *parent;
struct clk_hw *parent_hw;
- u32 dummy_v, orig_v;
+ u32 dummy_v, orig_v, clksel_shift;
int ret;
/* Clear PWRDN bit of HSDIVIDER */
ret = omap2_dflt_clk_enable(clk);
parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
- parent = to_clk_divider(parent_hw);
+ parent = to_clk_hw_omap(parent_hw);
/* Restore the dividers */
if (!ret) {
- orig_v = __raw_readl(parent->reg);
+ clksel_shift = __ffs(parent->clksel_mask);
+ orig_v = __raw_readl(parent->clksel_reg);
dummy_v = orig_v;
/* Write any other value different from the Read value */
- dummy_v ^= (1 << parent->shift);
- __raw_writel(dummy_v, parent->reg);
+ dummy_v ^= (1 << clksel_shift);
+ __raw_writel(dummy_v, parent->clksel_reg);
/* Write the original divider */
- __raw_writel(orig_v, parent->reg);
+ __raw_writel(orig_v, parent->clksel_reg);
}
return ret;
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 69337af748cc..075f7cc51026 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -2007,13 +2007,6 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
},
};
-/* uart2 */
-static struct omap_hwmod_dma_info uart2_edma_reqs[] = {
- { .name = "tx", .dma_req = 28, },
- { .name = "rx", .dma_req = 29, },
- { .dma_req = -1 }
-};
-
static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = {
{ .irq = 73 + OMAP_INTC_START, },
{ .irq = -1 },
@@ -2025,7 +2018,7 @@ static struct omap_hwmod am33xx_uart2_hwmod = {
.clkdm_name = "l4ls_clkdm",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = am33xx_uart2_irqs,
- .sdma_reqs = uart2_edma_reqs,
+ .sdma_reqs = uart1_edma_reqs,
.main_clk = "dpll_per_m2_div4_ck",
.prcm = {
.omap4 = {
diff --git a/trunk/arch/arm/mach-omap2/pm34xx.c b/trunk/arch/arm/mach-omap2/pm34xx.c
index 5a2d8034c8de..c01859398b54 100644
--- a/trunk/arch/arm/mach-omap2/pm34xx.c
+++ b/trunk/arch/arm/mach-omap2/pm34xx.c
@@ -546,10 +546,8 @@ static void __init prcm_setup_regs(void)
/* Clear any pending PRCM interrupts */
omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
- /*
- * We need to idle iva2_pwrdm even on am3703 with no iva2.
- */
- omap3_iva_idle();
+ if (omap3_has_iva())
+ omap3_iva_idle();
omap3_d2d_idle();
}
diff --git a/trunk/arch/arm/mach-prima2/pm.c b/trunk/arch/arm/mach-prima2/pm.c
index 8f595c0cc8d9..9936c180bf01 100644
--- a/trunk/arch/arm/mach-prima2/pm.c
+++ b/trunk/arch/arm/mach-prima2/pm.c
@@ -101,10 +101,8 @@ static int __init sirfsoc_of_pwrc_init(void)
struct device_node *np;
np = of_find_matching_node(NULL, pwrc_ids);
- if (!np) {
- pr_err("unable to find compatible sirf pwrc node in dtb\n");
- return -ENOENT;
- }
+ if (!np)
+ panic("unable to find compatible pwrc node in dtb\n");
/*
* pwrc behind rtciobrg is not located in memory space
diff --git a/trunk/arch/arm/mach-prima2/rstc.c b/trunk/arch/arm/mach-prima2/rstc.c
index d5e0cbc934c0..435019ca0a48 100644
--- a/trunk/arch/arm/mach-prima2/rstc.c
+++ b/trunk/arch/arm/mach-prima2/rstc.c
@@ -28,10 +28,8 @@ static int __init sirfsoc_of_rstc_init(void)
struct device_node *np;
np = of_find_matching_node(NULL, rstc_ids);
- if (!np) {
- pr_err("unable to find compatible sirf rstc node in dtb\n");
- return -ENOENT;
- }
+ if (!np)
+ panic("unable to find compatible rstc node in dtb\n");
sirfsoc_rstc_base = of_iomap(np, 0);
if (!sirfsoc_rstc_base)
diff --git a/trunk/arch/arm/mm/cache-v7.S b/trunk/arch/arm/mm/cache-v7.S
index 515b00064da8..15451ee4acc8 100644
--- a/trunk/arch/arm/mm/cache-v7.S
+++ b/trunk/arch/arm/mm/cache-v7.S
@@ -92,14 +92,6 @@ ENTRY(v7_flush_dcache_louis)
mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr
ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
-#ifdef CONFIG_ARM_ERRATA_643719
- ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
- ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do
- ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p?
- biceq r2, r2, #0x0000000f @ clear minor revision number
- teqeq r2, r1 @ test for errata affected core and if so...
- orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne')
-#endif
ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
moveq pc, lr @ return if level == 0
diff --git a/trunk/arch/arm/mm/flush.c b/trunk/arch/arm/mm/flush.c
index 32aa5861119f..0d473cce501c 100644
--- a/trunk/arch/arm/mm/flush.c
+++ b/trunk/arch/arm/mm/flush.c
@@ -300,39 +300,6 @@ void flush_dcache_page(struct page *page)
}
EXPORT_SYMBOL(flush_dcache_page);
-/*
- * Ensure cache coherency for the kernel mapping of this page. We can
- * assume that the page is pinned via kmap.
- *
- * If the page only exists in the page cache and there are no user
- * space mappings, this is a no-op since the page was already marked
- * dirty at creation. Otherwise, we need to flush the dirty kernel
- * cache lines directly.
- */
-void flush_kernel_dcache_page(struct page *page)
-{
- if (cache_is_vivt() || cache_is_vipt_aliasing()) {
- struct address_space *mapping;
-
- mapping = page_mapping(page);
-
- if (!mapping || mapping_mapped(mapping)) {
- void *addr;
-
- addr = page_address(page);
- /*
- * kmap_atomic() doesn't set the page virtual
- * address for highmem pages, and
- * kunmap_atomic() takes care of cache
- * flushing already.
- */
- if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
- __cpuc_flush_dcache_area(addr, PAGE_SIZE);
- }
- }
-}
-EXPORT_SYMBOL(flush_kernel_dcache_page);
-
/*
* Flush an anonymous page so that users of get_user_pages()
* can safely access the data. The expected sequence is:
diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c
index 4d409e6a552d..e0d8565671a6 100644
--- a/trunk/arch/arm/mm/mmu.c
+++ b/trunk/arch/arm/mm/mmu.c
@@ -616,12 +616,10 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type)
{
- pmd_t *p = pmd;
-
#ifndef CONFIG_ARM_LPAE
/*
* In classic MMU format, puds and pmds are folded in to
@@ -640,7 +638,7 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
phys += SECTION_SIZE;
} while (pmd++, addr += SECTION_SIZE, addr != end);
- flush_pmd_entry(p);
+ flush_pmd_entry(pmd);
}
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
@@ -663,7 +661,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
*/
if (type->prot_sect &&
((addr | next | phys) & ~SECTION_MASK) == 0) {
- __map_init_section(pmd, addr, next, phys, type);
+ map_init_section(pmd, addr, next, phys, type);
} else {
alloc_init_pte(pmd, addr, next,
__phys_to_pfn(phys), type);
diff --git a/trunk/arch/arm/mm/nommu.c b/trunk/arch/arm/mm/nommu.c
index eb5293a69a84..d51225f90ae2 100644
--- a/trunk/arch/arm/mm/nommu.c
+++ b/trunk/arch/arm/mm/nommu.c
@@ -57,12 +57,6 @@ void flush_dcache_page(struct page *page)
}
EXPORT_SYMBOL(flush_dcache_page);
-void flush_kernel_dcache_page(struct page *page)
-{
- __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
-}
-EXPORT_SYMBOL(flush_kernel_dcache_page);
-
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *dst, const void *src,
unsigned long len)
diff --git a/trunk/arch/arm/mm/proc-fa526.S b/trunk/arch/arm/mm/proc-fa526.S
index aaeb6c127c7a..d217e9795d74 100644
--- a/trunk/arch/arm/mm/proc-fa526.S
+++ b/trunk/arch/arm/mm/proc-fa526.S
@@ -81,6 +81,7 @@ ENDPROC(cpu_fa526_reset)
*/
.align 4
ENTRY(cpu_fa526_do_idle)
+ mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr
diff --git a/trunk/arch/arm/mm/proc-macros.S b/trunk/arch/arm/mm/proc-macros.S
index e3c48a3fe063..f9a0aa725ea9 100644
--- a/trunk/arch/arm/mm/proc-macros.S
+++ b/trunk/arch/arm/mm/proc-macros.S
@@ -333,8 +333,3 @@ ENTRY(\name\()_tlb_fns)
.endif
.size \name\()_tlb_fns, . - \name\()_tlb_fns
.endm
-
-.macro globl_equ x, y
- .globl \x
- .equ \x, \y
-.endm
diff --git a/trunk/arch/arm/mm/proc-v7.S b/trunk/arch/arm/mm/proc-v7.S
index e35fec34453e..2c73a7301ff7 100644
--- a/trunk/arch/arm/mm/proc-v7.S
+++ b/trunk/arch/arm/mm/proc-v7.S
@@ -138,29 +138,6 @@ ENTRY(cpu_v7_do_resume)
mov r0, r8 @ control register
b cpu_resume_mmu
ENDPROC(cpu_v7_do_resume)
-#endif
-
-#ifdef CONFIG_CPU_PJ4B
- globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm
- globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext
- globl_equ cpu_pj4b_proc_init, cpu_v7_proc_init
- globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin
- globl_equ cpu_pj4b_reset, cpu_v7_reset
-#ifdef CONFIG_PJ4B_ERRATA_4742
-ENTRY(cpu_pj4b_do_idle)
- dsb @ WFI may enter a low-power mode
- wfi
- dsb @barrier
- mov pc, lr
-ENDPROC(cpu_pj4b_do_idle)
-#else
- globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
-#endif
- globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area
- globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend
- globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume
- globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size
-
#endif
__CPUINIT
@@ -373,9 +350,6 @@ __v7_setup_stack:
@ define struct processor (see and proc-macros.S)
define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
-#ifdef CONFIG_CPU_PJ4B
- define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
-#endif
.section ".rodata"
@@ -388,7 +362,7 @@ __v7_setup_stack:
/*
* Standard v7 proc info content
*/
-.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
+.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
@@ -401,7 +375,7 @@ __v7_setup_stack:
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
HWCAP_EDSP | HWCAP_TLS | \hwcaps
.long cpu_v7_name
- .long \proc_fns
+ .long v7_processor_functions
.long v7wbi_tlb_fns
.long v6_user_fns
.long v7_cache_fns
@@ -433,14 +407,12 @@ __v7_ca9mp_proc_info:
/*
* Marvell PJ4B processor.
*/
-#ifdef CONFIG_CPU_PJ4B
.type __v7_pj4b_proc_info, #object
__v7_pj4b_proc_info:
- .long 0x560f5800
- .long 0xff0fff00
- __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions
+ .long 0x562f5840
+ .long 0xfffffff0
+ __v7_proc __v7_pj4b_setup
.size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
-#endif
/*
* ARM Ltd. Cortex A7 processor.
diff --git a/trunk/arch/arm/plat-samsung/pm.c b/trunk/arch/arm/plat-samsung/pm.c
index bd7124c87fea..53210ec4e8ec 100644
--- a/trunk/arch/arm/plat-samsung/pm.c
+++ b/trunk/arch/arm/plat-samsung/pm.c
@@ -16,7 +16,6 @@
#include
#include
#include
-#include
#include
#include
@@ -262,8 +261,7 @@ static int s3c_pm_enter(suspend_state_t state)
* require a full power-cycle)
*/
- if (!of_have_populated_dt() &&
- !any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
+ if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
!any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
printk(KERN_ERR "%s: Aborting sleep\n", __func__);
@@ -272,11 +270,8 @@ static int s3c_pm_enter(suspend_state_t state)
/* save all necessary core registers not covered by the drivers */
- if (!of_have_populated_dt()) {
- samsung_pm_save_gpios();
- samsung_pm_saved_gpios();
- }
-
+ samsung_pm_save_gpios();
+ samsung_pm_saved_gpios();
s3c_pm_save_uarts();
s3c_pm_save_core();
@@ -315,11 +310,8 @@ static int s3c_pm_enter(suspend_state_t state)
s3c_pm_restore_core();
s3c_pm_restore_uarts();
-
- if (!of_have_populated_dt()) {
- samsung_pm_restore_gpios();
- s3c_pm_restored_gpios();
- }
+ samsung_pm_restore_gpios();
+ s3c_pm_restored_gpios();
s3c_pm_debug_init();
diff --git a/trunk/arch/arm64/include/asm/pgtable.h b/trunk/arch/arm64/include/asm/pgtable.h
index 3a768e96cf0e..e333a243bfcc 100644
--- a/trunk/arch/arm64/include/asm/pgtable.h
+++ b/trunk/arch/arm64/include/asm/pgtable.h
@@ -320,6 +320,13 @@ extern int kern_addr_valid(unsigned long addr);
#include
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
diff --git a/trunk/arch/arm64/kernel/perf_event.c b/trunk/arch/arm64/kernel/perf_event.c
index 9ba33c40cdf8..1e49e5eb81e9 100644
--- a/trunk/arch/arm64/kernel/perf_event.c
+++ b/trunk/arch/arm64/kernel/perf_event.c
@@ -1336,7 +1336,6 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
return;
}
- perf_callchain_store(entry, regs->pc);
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < PERF_MAX_STACK_DEPTH &&
diff --git a/trunk/arch/avr32/include/asm/pgtable.h b/trunk/arch/avr32/include/asm/pgtable.h
index 4beff97e2033..6fbfea61f7bb 100644
--- a/trunk/arch/avr32/include/asm/pgtable.h
+++ b/trunk/arch/avr32/include/asm/pgtable.h
@@ -362,6 +362,9 @@ typedef pte_t *pte_addr_t;
#define kern_addr_valid(addr) (1)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
/* No page table caches to initialize (?) */
#define pgtable_cache_init() do { } while(0)
diff --git a/trunk/arch/blackfin/include/asm/pgtable.h b/trunk/arch/blackfin/include/asm/pgtable.h
index 0b049019eba7..b8663921d3c1 100644
--- a/trunk/arch/blackfin/include/asm/pgtable.h
+++ b/trunk/arch/blackfin/include/asm/pgtable.h
@@ -88,6 +88,7 @@ extern char empty_zero_page[];
* No page table caches to initialise.
*/
#define pgtable_cache_init() do { } while (0)
+#define io_remap_pfn_range remap_pfn_range
/*
* All 32bit addresses are effectively valid for vmalloc...
diff --git a/trunk/arch/c6x/include/asm/pgtable.h b/trunk/arch/c6x/include/asm/pgtable.h
index c0eed5b18860..38a4312eb2cb 100644
--- a/trunk/arch/c6x/include/asm/pgtable.h
+++ b/trunk/arch/c6x/include/asm/pgtable.h
@@ -71,6 +71,7 @@ extern unsigned long empty_zero_page;
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
+#define io_remap_pfn_range remap_pfn_range
#include
diff --git a/trunk/arch/cris/include/asm/pgtable.h b/trunk/arch/cris/include/asm/pgtable.h
index 8b8c86793225..7df430138355 100644
--- a/trunk/arch/cris/include/asm/pgtable.h
+++ b/trunk/arch/cris/include/asm/pgtable.h
@@ -258,6 +258,9 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
diff --git a/trunk/arch/frv/include/asm/pgtable.h b/trunk/arch/frv/include/asm/pgtable.h
index eb0110acd19b..6bc241e4b4f8 100644
--- a/trunk/arch/frv/include/asm/pgtable.h
+++ b/trunk/arch/frv/include/asm/pgtable.h
@@ -488,6 +488,9 @@ static inline int pte_file(pte_t pte)
#define PageSkip(page) (0)
#define kern_addr_valid(addr) (1)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/trunk/arch/h8300/include/asm/pgtable.h b/trunk/arch/h8300/include/asm/pgtable.h
index 7ca20f894dd7..62ef17676b40 100644
--- a/trunk/arch/h8300/include/asm/pgtable.h
+++ b/trunk/arch/h8300/include/asm/pgtable.h
@@ -52,6 +52,9 @@ extern int is_in_rom(unsigned long);
*/
#define pgtable_cache_init() do { } while (0)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
diff --git a/trunk/arch/hexagon/include/asm/pgtable.h b/trunk/arch/hexagon/include/asm/pgtable.h
index d8bd54fa431e..20d55f69fe55 100644
--- a/trunk/arch/hexagon/include/asm/pgtable.h
+++ b/trunk/arch/hexagon/include/asm/pgtable.h
@@ -452,6 +452,10 @@ static inline int pte_exec(pte_t pte)
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+/* Nothing special about IO remapping at this point */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
/* I think this is in case we have page table caches; needed by init/main.c */
#define pgtable_cache_init() do { } while (0)
diff --git a/trunk/arch/ia64/include/asm/irqflags.h b/trunk/arch/ia64/include/asm/irqflags.h
index cec6c06b52c0..1bf2cf2f4ab4 100644
--- a/trunk/arch/ia64/include/asm/irqflags.h
+++ b/trunk/arch/ia64/include/asm/irqflags.h
@@ -11,7 +11,6 @@
#define _ASM_IA64_IRQFLAGS_H
#include
-#include
#ifdef CONFIG_IA64_DEBUG_IRQ
extern unsigned long last_cli_ip;
diff --git a/trunk/arch/ia64/include/asm/pgtable.h b/trunk/arch/ia64/include/asm/pgtable.h
index 7935115398a6..815810cbbedc 100644
--- a/trunk/arch/ia64/include/asm/pgtable.h
+++ b/trunk/arch/ia64/include/asm/pgtable.h
@@ -493,6 +493,9 @@ extern void paging_init (void);
#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/trunk/arch/m32r/include/asm/pgtable.h b/trunk/arch/m32r/include/asm/pgtable.h
index 103ce6710f07..8a28cfea2729 100644
--- a/trunk/arch/m32r/include/asm/pgtable.h
+++ b/trunk/arch/m32r/include/asm/pgtable.h
@@ -347,6 +347,9 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/trunk/arch/m68k/include/asm/pgtable_mm.h b/trunk/arch/m68k/include/asm/pgtable_mm.h
index 9f5abbda1ea7..dc35e0e106e4 100644
--- a/trunk/arch/m68k/include/asm/pgtable_mm.h
+++ b/trunk/arch/m68k/include/asm/pgtable_mm.h
@@ -135,6 +135,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
/* MMU-specific headers */
#ifdef CONFIG_SUN3
diff --git a/trunk/arch/m68k/include/asm/pgtable_no.h b/trunk/arch/m68k/include/asm/pgtable_no.h
index c527fc2ecf82..037028f4ab70 100644
--- a/trunk/arch/m68k/include/asm/pgtable_no.h
+++ b/trunk/arch/m68k/include/asm/pgtable_no.h
@@ -55,6 +55,9 @@ extern unsigned int kobjsize(const void *objp);
*/
#define pgtable_cache_init() do { } while (0)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
diff --git a/trunk/arch/metag/include/asm/hugetlb.h b/trunk/arch/metag/include/asm/hugetlb.h
index 471f481e67f3..f545477e61f3 100644
--- a/trunk/arch/metag/include/asm/hugetlb.h
+++ b/trunk/arch/metag/include/asm/hugetlb.h
@@ -2,7 +2,6 @@
#define _ASM_METAG_HUGETLB_H
#include
-#include
static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/trunk/arch/metag/include/asm/pgtable.h b/trunk/arch/metag/include/asm/pgtable.h
index 0d9dc5487296..1cd13d595198 100644
--- a/trunk/arch/metag/include/asm/pgtable.h
+++ b/trunk/arch/metag/include/asm/pgtable.h
@@ -333,6 +333,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
/*
* No page table caches to initialise
*/
diff --git a/trunk/arch/microblaze/include/asm/pgtable.h b/trunk/arch/microblaze/include/asm/pgtable.h
index 95cef0b5f836..a7311cd9dee0 100644
--- a/trunk/arch/microblaze/include/asm/pgtable.h
+++ b/trunk/arch/microblaze/include/asm/pgtable.h
@@ -13,6 +13,9 @@
#include
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#ifndef __ASSEMBLY__
extern int mem_init_done;
#endif
diff --git a/trunk/arch/mips/include/asm/pgtable.h b/trunk/arch/mips/include/asm/pgtable.h
index 008324d1c261..8b8f6b393363 100644
--- a/trunk/arch/mips/include/asm/pgtable.h
+++ b/trunk/arch/mips/include/asm/pgtable.h
@@ -394,7 +394,9 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
}
-#define io_remap_pfn_range io_remap_pfn_range
+#else
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/trunk/arch/mn10300/include/asm/irqflags.h b/trunk/arch/mn10300/include/asm/irqflags.h
index 8730c0a3c37d..678f68d5f37b 100644
--- a/trunk/arch/mn10300/include/asm/irqflags.h
+++ b/trunk/arch/mn10300/include/asm/irqflags.h
@@ -13,8 +13,9 @@
#define _ASM_IRQFLAGS_H
#include
-/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */
-#include
+#ifndef __ASSEMBLY__
+#include
+#endif
/*
* interrupt control
diff --git a/trunk/arch/mn10300/include/asm/pgtable.h b/trunk/arch/mn10300/include/asm/pgtable.h
index 2ddaa67e7983..a1e894b5f65b 100644
--- a/trunk/arch/mn10300/include/asm/pgtable.h
+++ b/trunk/arch/mn10300/include/asm/pgtable.h
@@ -486,6 +486,9 @@ extern void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range((vma), (vaddr), (pfn), (size), (prot))
+
#define MK_IOSPACE_PFN(space, pfn) (pfn)
#define GET_IOSPACE(pfn) 0
#define GET_PFN(pfn) (pfn)
diff --git a/trunk/arch/mn10300/include/asm/smp.h b/trunk/arch/mn10300/include/asm/smp.h
index 56c42417d428..6745dbe64944 100644
--- a/trunk/arch/mn10300/include/asm/smp.h
+++ b/trunk/arch/mn10300/include/asm/smp.h
@@ -24,7 +24,6 @@
#ifndef __ASSEMBLY__
#include
#include
-#include
#endif
#ifdef CONFIG_SMP
@@ -86,7 +85,7 @@ extern cpumask_t cpu_boot_map;
extern void smp_init_cpus(void);
extern void smp_cache_interrupt(void);
extern void send_IPI_allbutself(int irq);
-extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait);
+extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -101,7 +100,6 @@ extern void __cpu_die(unsigned int cpu);
#ifndef __ASSEMBLY__
static inline void smp_init_cpus(void) {}
-#define raw_smp_processor_id() 0
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_SMP */
diff --git a/trunk/arch/mn10300/include/asm/uaccess.h b/trunk/arch/mn10300/include/asm/uaccess.h
index d7966e0f7698..780560b330d9 100644
--- a/trunk/arch/mn10300/include/asm/uaccess.h
+++ b/trunk/arch/mn10300/include/asm/uaccess.h
@@ -161,7 +161,7 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_check(x, ptr, size) \
({ \
- const __typeof__(*(ptr))* __guc_ptr = (ptr); \
+ const __typeof__(ptr) __guc_ptr = (ptr); \
int _e; \
if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
_e = __get_user_nocheck((x), __guc_ptr, (size)); \
diff --git a/trunk/arch/mn10300/kernel/setup.c b/trunk/arch/mn10300/kernel/setup.c
index ebac9c11f796..33c3bd1e5c6d 100644
--- a/trunk/arch/mn10300/kernel/setup.c
+++ b/trunk/arch/mn10300/kernel/setup.c
@@ -38,7 +38,6 @@ struct mn10300_cpuinfo boot_cpu_data;
/* For PCI or other memory-mapped resources */
unsigned long pci_mem_start = 0x18000000;
-static char __initdata cmd_line[COMMAND_LINE_SIZE];
char redboot_command_line[COMMAND_LINE_SIZE] =
"console=ttyS0,115200 root=/dev/mtdblock3 rw";
@@ -75,19 +74,45 @@ static const char *const mn10300_cputypes[] = {
};
/*
- * Pick out the memory size. We look for mem=size,
- * where size is "size[KkMm]"
+ *
*/
-static int __init early_mem(char *p)
+static void __init parse_mem_cmdline(char **cmdline_p)
{
- memory_size = memparse(p, &p);
+ char *from, *to, c;
+
+ /* save unparsed command line copy for /proc/cmdline */
+ strcpy(boot_command_line, redboot_command_line);
+
+ /* see if there's an explicit memory size option */
+ from = redboot_command_line;
+ to = redboot_command_line;
+ c = ' ';
+
+ for (;;) {
+ if (c == ' ' && !memcmp(from, "mem=", 4)) {
+ if (to != redboot_command_line)
+ to--;
+ memory_size = memparse(from + 4, &from);
+ }
+
+ c = *(from++);
+ if (!c)
+ break;
+
+ *(to++) = c;
+ }
+
+ *to = '\0';
+ *cmdline_p = redboot_command_line;
if (memory_size == 0)
panic("Memory size not known\n");
- return 0;
+ memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
+ memory_size;
+ if (memory_end > phys_memory_end)
+ memory_end = phys_memory_end;
}
-early_param("mem", early_mem);
/*
* architecture specific setup
@@ -100,20 +125,7 @@ void __init setup_arch(char **cmdline_p)
cpu_init();
unit_setup();
smp_init_cpus();
-
- /* save unparsed command line copy for /proc/cmdline */
- strlcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE);
-
- /* populate cmd_line too for later use, preserving boot_command_line */
- strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
- *cmdline_p = cmd_line;
-
- parse_early_param();
-
- memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
- memory_size;
- if (memory_end > phys_memory_end)
- memory_end = phys_memory_end;
+ parse_mem_cmdline(cmdline_p);
init_mm.start_code = (unsigned long)&_text;
init_mm.end_code = (unsigned long) &_etext;
diff --git a/trunk/arch/mn10300/unit-asb2305/pci-asb2305.c b/trunk/arch/mn10300/unit-asb2305/pci-asb2305.c
index febb9cd83177..c4e2e79281e8 100644
--- a/trunk/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/trunk/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -221,7 +221,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
/* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
- vma->vm_flags |= VM_LOCKED;
+ vma->vm_flags |= VM_LOCKED | VM_IO;
prot = pgprot_val(vma->vm_page_prot);
prot &= ~_PAGE_CACHE;
diff --git a/trunk/arch/openrisc/include/asm/pgtable.h b/trunk/arch/openrisc/include/asm/pgtable.h
index 37bf6a3ef8f4..14c900cfd30a 100644
--- a/trunk/arch/openrisc/include/asm/pgtable.h
+++ b/trunk/arch/openrisc/include/asm/pgtable.h
@@ -446,6 +446,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#include
/*
diff --git a/trunk/arch/parisc/hpux/fs.c b/trunk/arch/parisc/hpux/fs.c
index 88d0962de65a..838b479a42c4 100644
--- a/trunk/arch/parisc/hpux/fs.c
+++ b/trunk/arch/parisc/hpux/fs.c
@@ -60,7 +60,6 @@ struct hpux_dirent {
};
struct getdents_callback {
- struct dir_context ctx;
struct hpux_dirent __user *current_dir;
struct hpux_dirent __user *previous;
int count;
@@ -111,23 +110,24 @@ int hpux_getdents(unsigned int fd, struct hpux_dirent __user *dirent, unsigned i
{
struct fd arg;
struct hpux_dirent __user * lastdirent;
- struct getdents_callback buf = {
- .ctx.actor = filldir,
- .current_dir = dirent,
- .count = count
- };
+ struct getdents_callback buf;
int error;
arg = fdget(fd);
if (!arg.file)
return -EBADF;
- error = iterate_dir(arg.file, &buf.ctx);
+ buf.current_dir = dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(arg.file, filldir, &buf);
if (error >= 0)
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
- if (put_user(buf.ctx.pos, &lastdirent->d_off))
+ if (put_user(arg.file->f_pos, &lastdirent->d_off))
error = -EFAULT;
else
error = count - buf.count;
diff --git a/trunk/arch/parisc/include/asm/mmzone.h b/trunk/arch/parisc/include/asm/mmzone.h
index b6b34a0987e7..cc50d33b7b88 100644
--- a/trunk/arch/parisc/include/asm/mmzone.h
+++ b/trunk/arch/parisc/include/asm/mmzone.h
@@ -27,7 +27,7 @@ extern struct node_map_data node_data[];
#define PFNNID_SHIFT (30 - PAGE_SHIFT)
#define PFNNID_MAP_MAX 512 /* support 512GB */
-extern signed char pfnnid_map[PFNNID_MAP_MAX];
+extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
#ifndef CONFIG_64BIT
#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
@@ -46,7 +46,7 @@ static inline int pfn_to_nid(unsigned long pfn)
i = pfn >> PFNNID_SHIFT;
BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
- return pfnnid_map[i];
+ return (int)pfnnid_map[i];
}
static inline int pfn_valid(int pfn)
diff --git a/trunk/arch/parisc/include/asm/pci.h b/trunk/arch/parisc/include/asm/pci.h
index 465154076d23..3234f492d575 100644
--- a/trunk/arch/parisc/include/asm/pci.h
+++ b/trunk/arch/parisc/include/asm/pci.h
@@ -225,9 +225,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? 15 : 14;
}
-#define HAVE_PCI_MMAP
-
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-
#endif /* __ASM_PARISC_PCI_H */
diff --git a/trunk/arch/parisc/include/asm/pgtable.h b/trunk/arch/parisc/include/asm/pgtable.h
index 34899b5d959a..1e40d7f86be3 100644
--- a/trunk/arch/parisc/include/asm/pgtable.h
+++ b/trunk/arch/parisc/include/asm/pgtable.h
@@ -506,6 +506,9 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#endif
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
/* We provide our own get_unmapped_area to provide cache coherency */
diff --git a/trunk/arch/parisc/kernel/hardware.c b/trunk/arch/parisc/kernel/hardware.c
index 872275659d98..9e2d2e408529 100644
--- a/trunk/arch/parisc/kernel/hardware.c
+++ b/trunk/arch/parisc/kernel/hardware.c
@@ -1205,7 +1205,6 @@ static struct hp_hardware hp_hardware_list[] = {
{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
- {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
diff --git a/trunk/arch/parisc/kernel/pacache.S b/trunk/arch/parisc/kernel/pacache.S
index b743a80eaba0..36d7f402e48e 100644
--- a/trunk/arch/parisc/kernel/pacache.S
+++ b/trunk/arch/parisc/kernel/pacache.S
@@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm)
#endif
ldil L%dcache_stride, %r1
- ldw R%dcache_stride(%r1), r31
+ ldw R%dcache_stride(%r1), %r1
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
@@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm)
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
add %r28, %r25, %r25
- sub %r25, r31, %r25
-
-
-1: fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
- fdc,m r31(%r28)
+ sub %r25, %r1, %r25
+
+
+1: fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
cmpb,COND(<<) %r28, %r25,1b
- fdc,m r31(%r28)
+ fdc,m %r1(%r28)
sync
@@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm)
#endif
ldil L%icache_stride, %r1
- ldw R%icache_stride(%r1), %r31
+ ldw R%icache_stride(%r1), %r1
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
@@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm)
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
add %r28, %r25, %r25
- sub %r25, %r31, %r25
+ sub %r25, %r1, %r25
/* fic only has the type 26 form on PA1.1, requiring an
* explicit space specification, so use %sr4 */
-1: fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
- fic,m %r31(%sr4,%r28)
+1: fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
cmpb,COND(<<) %r28, %r25,1b
- fic,m %r31(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
sync
diff --git a/trunk/arch/parisc/kernel/pci.c b/trunk/arch/parisc/kernel/pci.c
index 64f2764a8cef..60309051875e 100644
--- a/trunk/arch/parisc/kernel/pci.c
+++ b/trunk/arch/parisc/kernel/pci.c
@@ -220,33 +220,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /*
- * I/O space can be accessed via normal processor loads and stores on
- * this platform but for now we elect not to do this and portable
- * drivers should not do this anyway.
- */
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- if (write_combine)
- return -EINVAL;
-
- /*
- * Ignore write-combine; for now only return uncached mappings.
- */
- prot = pgprot_val(vma->vm_page_prot);
- prot |= _PAGE_NO_CACHE;
- vma->vm_page_prot = __pgprot(prot);
-
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
-}
-
/*
* A driver is enabling the device. We make sure that all the appropriate
* bits are set to allow the device to operate as the driver is expecting.
diff --git a/trunk/arch/parisc/mm/init.c b/trunk/arch/parisc/mm/init.c
index 505b56c6b9b9..1c965642068b 100644
--- a/trunk/arch/parisc/mm/init.c
+++ b/trunk/arch/parisc/mm/init.c
@@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt
#ifdef CONFIG_DISCONTIGMEM
struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
-signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
+unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
#endif
static struct resource data_resource = {
diff --git a/trunk/arch/powerpc/include/asm/exception-64s.h b/trunk/arch/powerpc/include/asm/exception-64s.h
index 46793b58a761..8e5fae8beaf6 100644
--- a/trunk/arch/powerpc/include/asm/exception-64s.h
+++ b/trunk/arch/powerpc/include/asm/exception-64s.h
@@ -513,7 +513,7 @@ label##_common: \
*/
#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
- FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
+ FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
/*
* When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/trunk/arch/powerpc/include/asm/pgtable.h b/trunk/arch/powerpc/include/asm/pgtable.h
index b6293d26bd39..7aeb9555f6ea 100644
--- a/trunk/arch/powerpc/include/asm/pgtable.h
+++ b/trunk/arch/powerpc/include/asm/pgtable.h
@@ -198,6 +198,9 @@ extern void paging_init(void);
*/
#define kern_addr_valid(addr) (1)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#include
diff --git a/trunk/arch/powerpc/kernel/exceptions-64s.S b/trunk/arch/powerpc/kernel/exceptions-64s.S
index 40e4a17c8ba0..e783453f910d 100644
--- a/trunk/arch/powerpc/kernel/exceptions-64s.S
+++ b/trunk/arch/powerpc/kernel/exceptions-64s.S
@@ -683,7 +683,7 @@ machine_check_common:
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
- STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
+ STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
#ifdef CONFIG_PPC_DOORBELL
STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
diff --git a/trunk/arch/powerpc/kernel/irq.c b/trunk/arch/powerpc/kernel/irq.c
index ea185e0b3cae..5cbcf4d5a808 100644
--- a/trunk/arch/powerpc/kernel/irq.c
+++ b/trunk/arch/powerpc/kernel/irq.c
@@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void)
* in case we also had a rollover while hard disabled
*/
local_paca->irq_happened &= ~PACA_IRQ_DEC;
- if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
+ if (decrementer_check_overflow())
return 0x900;
/* Finally check if an external interrupt happened */
diff --git a/trunk/arch/powerpc/kernel/pci-common.c b/trunk/arch/powerpc/kernel/pci-common.c
index f46914a0f33e..eabeec991016 100644
--- a/trunk/arch/powerpc/kernel/pci-common.c
+++ b/trunk/arch/powerpc/kernel/pci-common.c
@@ -994,7 +994,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
ppc_md.pci_dma_bus_setup(bus);
}
-static void pcibios_setup_device(struct pci_dev *dev)
+void pcibios_setup_device(struct pci_dev *dev)
{
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
@@ -1015,17 +1015,6 @@ static void pcibios_setup_device(struct pci_dev *dev)
ppc_md.pci_irq_fixup(dev);
}
-int pcibios_add_device(struct pci_dev *dev)
-{
- /*
- * We can only call pcibios_setup_device() after bus setup is complete,
- * since some of the platform specific DMA setup code depends on it.
- */
- if (dev->bus->is_added)
- pcibios_setup_device(dev);
- return 0;
-}
-
void pcibios_setup_bus_devices(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1480,6 +1469,10 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
if (ppc_md.pcibios_enable_device_hook(dev))
return -EINVAL;
+ /* avoid pcie irq fix up impact on cardbus */
+ if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
+ pcibios_setup_device(dev);
+
return pci_enable_resources(dev, mask);
}
diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c
index 076d1242507a..b0f3e3f77e72 100644
--- a/trunk/arch/powerpc/kernel/process.c
+++ b/trunk/arch/powerpc/kernel/process.c
@@ -1369,7 +1369,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
#ifdef CONFIG_PPC64
/* Called with hard IRQs off */
-void notrace __ppc64_runlatch_on(void)
+void __ppc64_runlatch_on(void)
{
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
@@ -1382,7 +1382,7 @@ void notrace __ppc64_runlatch_on(void)
}
/* Called with hard IRQs off */
-void notrace __ppc64_runlatch_off(void)
+void __ppc64_runlatch_off(void)
{
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
diff --git a/trunk/arch/powerpc/kernel/traps.c b/trunk/arch/powerpc/kernel/traps.c
index c0e5caf8ccc7..f18c79c324ef 100644
--- a/trunk/arch/powerpc/kernel/traps.c
+++ b/trunk/arch/powerpc/kernel/traps.c
@@ -1165,16 +1165,6 @@ void __kprobes program_check_exception(struct pt_regs *regs)
exception_exit(prev_state);
}
-/*
- * This occurs when running in hypervisor mode on POWER6 or later
- * and an illegal instruction is encountered.
- */
-void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
-{
- regs->msr |= REASON_ILLEGAL;
- program_check_exception(regs);
-}
-
void alignment_exception(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
diff --git a/trunk/arch/powerpc/kvm/booke.c b/trunk/arch/powerpc/kvm/booke.c
index 1a1b51189773..5cd7ad0c1176 100644
--- a/trunk/arch/powerpc/kvm/booke.c
+++ b/trunk/arch/powerpc/kvm/booke.c
@@ -673,6 +673,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
ret = s;
goto out;
}
+ kvmppc_lazy_ee_enable();
kvm_guest_enter();
@@ -698,8 +699,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
#endif
- kvmppc_lazy_ee_enable();
-
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
/* No need for kvm_guest_exit. It's done in handle_exit.
diff --git a/trunk/arch/powerpc/mm/hugetlbpage.c b/trunk/arch/powerpc/mm/hugetlbpage.c
index 77fdd2cef33b..237c8e5f2640 100644
--- a/trunk/arch/powerpc/mm/hugetlbpage.c
+++ b/trunk/arch/powerpc/mm/hugetlbpage.c
@@ -592,14 +592,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
do {
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end);
- if (!is_hugepd(pmd)) {
- /*
- * if it is not hugepd pointer, we should already find
- * it cleared.
- */
- WARN_ON(!pmd_none_or_clear_bad(pmd));
+ if (pmd_none_or_clear_bad(pmd))
continue;
- }
#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* Increment next by the size of the huge mapping since
diff --git a/trunk/arch/powerpc/platforms/cell/spufs/inode.c b/trunk/arch/powerpc/platforms/cell/spufs/inode.c
index f3900427ffab..35f77a42bedf 100644
--- a/trunk/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/trunk/arch/powerpc/platforms/cell/spufs/inode.c
@@ -238,7 +238,7 @@ const struct file_operations spufs_context_fops = {
.release = spufs_dir_close,
.llseek = dcache_dir_lseek,
.read = generic_read_dir,
- .iterate = dcache_readdir,
+ .readdir = dcache_readdir,
.fsync = noop_fsync,
};
EXPORT_SYMBOL_GPL(spufs_context_fops);
diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_cache.c b/trunk/arch/powerpc/platforms/pseries/eeh_cache.c
index 5ce3ba7ad137..5a4c87903057 100644
--- a/trunk/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/trunk/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -294,6 +294,8 @@ void __init eeh_addr_cache_build(void)
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
for_each_pci_dev(dev) {
+ eeh_addr_cache_insert_dev(dev);
+
dn = pci_device_to_OF_node(dev);
if (!dn)
continue;
@@ -306,8 +308,6 @@ void __init eeh_addr_cache_build(void)
dev->dev.archdata.edev = edev;
edev->pdev = dev;
- eeh_addr_cache_insert_dev(dev);
-
eeh_sysfs_add_device(dev);
}
diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_pe.c b/trunk/arch/powerpc/platforms/pseries/eeh_pe.c
index 9d4a9e8562b2..fe43d1aa2cf1 100644
--- a/trunk/arch/powerpc/platforms/pseries/eeh_pe.c
+++ b/trunk/arch/powerpc/platforms/pseries/eeh_pe.c
@@ -639,8 +639,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
if (pe->type & EEH_PE_PHB) {
bus = pe->phb->bus;
- } else if (pe->type & EEH_PE_BUS ||
- pe->type & EEH_PE_DEVICE) {
+ } else if (pe->type & EEH_PE_BUS) {
edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
pdev = eeh_dev_to_pci_dev(edev);
if (pdev)
diff --git a/trunk/arch/powerpc/sysdev/fsl_pci.c b/trunk/arch/powerpc/sysdev/fsl_pci.c
index 46ac1ddea683..028ac1f71b51 100644
--- a/trunk/arch/powerpc/sysdev/fsl_pci.c
+++ b/trunk/arch/powerpc/sysdev/fsl_pci.c
@@ -97,14 +97,22 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
return indirect_read_config(bus, devfn, offset, len, val);
}
-#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
-
-static struct pci_ops fsl_indirect_pcie_ops =
+static struct pci_ops fsl_indirect_pci_ops =
{
.read = fsl_indirect_read_config,
.write = indirect_write_config,
};
+static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
+ resource_size_t cfg_addr,
+ resource_size_t cfg_data, u32 flags)
+{
+ setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
+ hose->ops = &fsl_indirect_pci_ops;
+}
+
+#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
+
#define MAX_PHYS_ADDR_BITS 40
static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
@@ -496,15 +504,13 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
if (!hose->private_data)
goto no_bridge;
- setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
- PPC_INDIRECT_TYPE_BIG_ENDIAN);
+ fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
+ PPC_INDIRECT_TYPE_BIG_ENDIAN);
if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
- /* use fsl_indirect_read_config for PCIe */
- hose->ops = &fsl_indirect_pcie_ops;
/* For PCIE read HEADER_TYPE to identify controler mode */
early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
@@ -808,8 +814,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
if (ret)
goto err0;
} else {
- setup_indirect_pci(hose, rsrc_cfg.start,
- rsrc_cfg.start + 4, 0);
+ fsl_setup_indirect_pci(hose, rsrc_cfg.start,
+ rsrc_cfg.start + 4, 0);
}
printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
diff --git a/trunk/arch/s390/include/asm/dma-mapping.h b/trunk/arch/s390/include/asm/dma-mapping.h
index 2f8c1abeb086..886ac7d4937a 100644
--- a/trunk/arch/s390/include/asm/dma-mapping.h
+++ b/trunk/arch/s390/include/asm/dma-mapping.h
@@ -50,10 +50,9 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
- debug_dma_mapping_error(dev, dma_addr);
if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr);
- return (dma_addr == DMA_ERROR_CODE);
+ return (dma_addr == 0UL);
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h
index 9aefa3c64eb2..e8b6e5b8932c 100644
--- a/trunk/arch/s390/include/asm/pgtable.h
+++ b/trunk/arch/s390/include/asm/pgtable.h
@@ -58,6 +58,9 @@ extern unsigned long zero_page_mask;
#define __HAVE_COLOR_ZERO_PAGE
/* TODO: s390 cannot support io_remap_pfn_range... */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#endif /* !__ASSEMBLY__ */
/*
diff --git a/trunk/arch/s390/kernel/ipl.c b/trunk/arch/s390/kernel/ipl.c
index feb719d3c851..d8a6a385d048 100644
--- a/trunk/arch/s390/kernel/ipl.c
+++ b/trunk/arch/s390/kernel/ipl.c
@@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
.write = reipl_fcp_scpdata_write,
};
-DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
+DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
reipl_block_fcp->ipl_info.fcp.wwpn);
-DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
+DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
reipl_block_fcp->ipl_info.fcp.lun);
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
reipl_block_fcp->ipl_info.fcp.bootprog);
@@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = {
/* FCP dump device attributes */
-DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
+DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
dump_block_fcp->ipl_info.fcp.wwpn);
-DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
+DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
dump_block_fcp->ipl_info.fcp.lun);
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
dump_block_fcp->ipl_info.fcp.bootprog);
diff --git a/trunk/arch/s390/kernel/irq.c b/trunk/arch/s390/kernel/irq.c
index dd3c1994b8bd..408e866ae548 100644
--- a/trunk/arch/s390/kernel/irq.c
+++ b/trunk/arch/s390/kernel/irq.c
@@ -312,7 +312,6 @@ void measurement_alert_subclass_unregister(void)
}
EXPORT_SYMBOL(measurement_alert_subclass_unregister);
-#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq)
{
/*
@@ -321,7 +320,6 @@ void synchronize_irq(unsigned int irq)
*/
}
EXPORT_SYMBOL_GPL(synchronize_irq);
-#endif
#ifndef CONFIG_PCI
diff --git a/trunk/arch/s390/mm/mem_detect.c b/trunk/arch/s390/mm/mem_detect.c
index cca388253a39..3cbd3b8bf311 100644
--- a/trunk/arch/s390/mm/mem_detect.c
+++ b/trunk/arch/s390/mm/mem_detect.c
@@ -123,8 +123,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
continue;
} else if ((addr <= chunk->addr) &&
(addr + size >= chunk->addr + chunk->size)) {
- memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
- memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
+ memset(chunk, 0 , sizeof(*chunk));
} else if (addr + size < chunk->addr + chunk->size) {
chunk->size = chunk->addr + chunk->size - addr - size;
chunk->addr = addr + size;
diff --git a/trunk/arch/score/include/asm/pgtable.h b/trunk/arch/score/include/asm/pgtable.h
index db96ad9afc03..2fd469807683 100644
--- a/trunk/arch/score/include/asm/pgtable.h
+++ b/trunk/arch/score/include/asm/pgtable.h
@@ -113,6 +113,9 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define pte_clear(mm, addr, xp) \
do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
diff --git a/trunk/arch/sh/include/asm/pgtable.h b/trunk/arch/sh/include/asm/pgtable.h
index cf434c64408d..9210e93a92c3 100644
--- a/trunk/arch/sh/include/asm/pgtable.h
+++ b/trunk/arch/sh/include/asm/pgtable.h
@@ -124,6 +124,9 @@ typedef pte_t *pte_addr_t;
#define kern_addr_valid(addr) (1)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
/*
diff --git a/trunk/arch/sparc/include/asm/Kbuild b/trunk/arch/sparc/include/asm/Kbuild
index 7e4a97fbded4..ff18e3cfb6b1 100644
--- a/trunk/arch/sparc/include/asm/Kbuild
+++ b/trunk/arch/sparc/include/asm/Kbuild
@@ -6,7 +6,6 @@ generic-y += cputime.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
-generic-y += linkage.h
generic-y += local64.h
generic-y += mutex.h
generic-y += irq_regs.h
diff --git a/trunk/arch/sparc/include/asm/leon.h b/trunk/arch/sparc/include/asm/leon.h
index b836e9297f2a..15a716934e4d 100644
--- a/trunk/arch/sparc/include/asm/leon.h
+++ b/trunk/arch/sparc/include/asm/leon.h
@@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void)
#ifdef CONFIG_SMP
# define LEON3_IRQ_IPI_DEFAULT 13
-# define LEON3_IRQ_TICKER (leon3_gptimer_irq)
+# define LEON3_IRQ_TICKER (leon3_ticker_irq)
# define LEON3_IRQ_CROSS_CALL 15
#endif
diff --git a/trunk/arch/sparc/include/asm/leon_amba.h b/trunk/arch/sparc/include/asm/leon_amba.h
index 24ec48c3ff90..f3034eddf468 100644
--- a/trunk/arch/sparc/include/asm/leon_amba.h
+++ b/trunk/arch/sparc/include/asm/leon_amba.h
@@ -47,7 +47,6 @@ struct amba_prom_registers {
#define LEON3_GPTIMER_LD 4
#define LEON3_GPTIMER_IRQEN 8
#define LEON3_GPTIMER_SEPIRQ 8
-#define LEON3_GPTIMER_TIMERS 0x7
#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */
/* 0 = hold scalar and counter */
diff --git a/trunk/arch/sparc/include/asm/linkage.h b/trunk/arch/sparc/include/asm/linkage.h
new file mode 100644
index 000000000000..291c2d01c44f
--- /dev/null
+++ b/trunk/arch/sparc/include/asm/linkage.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+/* Nothing to see here... */
+
+#endif
diff --git a/trunk/arch/sparc/include/asm/pgtable_32.h b/trunk/arch/sparc/include/asm/pgtable_32.h
index 502f632f6cc7..6fc13483f702 100644
--- a/trunk/arch/sparc/include/asm/pgtable_32.h
+++ b/trunk/arch/sparc/include/asm/pgtable_32.h
@@ -443,7 +443,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
-#define io_remap_pfn_range io_remap_pfn_range
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
diff --git a/trunk/arch/sparc/include/asm/pgtable_64.h b/trunk/arch/sparc/include/asm/pgtable_64.h
index 79c214efa3fe..7619f2f792af 100644
--- a/trunk/arch/sparc/include/asm/pgtable_64.h
+++ b/trunk/arch/sparc/include/asm/pgtable_64.h
@@ -914,7 +914,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
-#define io_remap_pfn_range io_remap_pfn_range
#include
#include
diff --git a/trunk/arch/sparc/kernel/ds.c b/trunk/arch/sparc/kernel/ds.c
index 5ef48dab5636..75bb608c423e 100644
--- a/trunk/arch/sparc/kernel/ds.c
+++ b/trunk/arch/sparc/kernel/ds.c
@@ -843,8 +843,7 @@ void ldom_reboot(const char *boot_command)
unsigned long len;
strcpy(full_boot_str, "boot ");
- strlcpy(full_boot_str + strlen("boot "), boot_command,
- sizeof(full_boot_str + strlen("boot ")));
+ strcpy(full_boot_str + strlen("boot "), boot_command);
len = strlen(full_boot_str);
if (reboot_data_supported) {
diff --git a/trunk/arch/sparc/kernel/leon_kernel.c b/trunk/arch/sparc/kernel/leon_kernel.c
index b7c68976cbc7..7c0231dabe44 100644
--- a/trunk/arch/sparc/kernel/leon_kernel.c
+++ b/trunk/arch/sparc/kernel/leon_kernel.c
@@ -38,6 +38,7 @@ static DEFINE_SPINLOCK(leon_irq_lock);
unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
+int leon3_ticker_irq; /* Timer ticker IRQ */
unsigned int sparc_leon_eirq;
#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
#define LEON_IACK (&leon3_irqctrl_regs->iclear)
@@ -277,9 +278,6 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
leon_clear_profile_irq(cpu);
- if (cpu == boot_cpu_id)
- timer_interrupt(irq, NULL);
-
ce = &per_cpu(sparc32_clockevent, cpu);
irq_enter();
@@ -301,7 +299,6 @@ void __init leon_init_timers(void)
int icsel;
int ampopts;
int err;
- u32 config;
sparc_config.get_cycles_offset = leon_cycles_offset;
sparc_config.cs_period = 1000000 / HZ;
@@ -380,6 +377,23 @@ void __init leon_init_timers(void)
LEON3_BYPASS_STORE_PA(
&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
+#ifdef CONFIG_SMP
+ leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
+
+ if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
+ (1<e[leon3_gptimer_idx+1].val,
+ 0);
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
+ (((1000000/HZ) - 1)));
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
+ 0);
+#endif
+
/*
* The IRQ controller may (if implemented) consist of multiple
* IRQ controllers, each mapped on a 4Kb boundary.
@@ -402,6 +416,13 @@ void __init leon_init_timers(void)
if (eirq != 0)
leon_eirq_setup(eirq);
+ irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
+ err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
+ if (err) {
+ printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
+ prom_halt();
+ }
+
#ifdef CONFIG_SMP
{
unsigned long flags;
@@ -418,31 +439,30 @@ void __init leon_init_timers(void)
}
#endif
- config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config);
- if (config & (1 << LEON3_GPTIMER_SEPIRQ))
- leon3_gptimer_irq += leon3_gptimer_idx;
- else if ((config & LEON3_GPTIMER_TIMERS) > 1)
- pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
+ LEON3_GPTIMER_EN |
+ LEON3_GPTIMER_RL |
+ LEON3_GPTIMER_LD |
+ LEON3_GPTIMER_IRQEN);
#ifdef CONFIG_SMP
/* Install per-cpu IRQ handler for broadcasted ticker */
- irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq,
+ irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
"per-cpu", 0);
err = request_irq(irq, leon_percpu_timer_ce_interrupt,
- IRQF_PERCPU | IRQF_TIMER, "timer", NULL);
-#else
- irq = _leon_build_device_irq(NULL, leon3_gptimer_irq);
- err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
-#endif
+ IRQF_PERCPU | IRQF_TIMER, "ticker",
+ NULL);
if (err) {
- pr_err("Unable to attach timer IRQ%d\n", irq);
+ printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq);
prom_halt();
}
- LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
+
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
LEON3_GPTIMER_EN |
LEON3_GPTIMER_RL |
LEON3_GPTIMER_LD |
LEON3_GPTIMER_IRQEN);
+#endif
return;
bad:
printk(KERN_ERR "No Timer/irqctrl found\n");
diff --git a/trunk/arch/sparc/kernel/leon_pci_grpci1.c b/trunk/arch/sparc/kernel/leon_pci_grpci1.c
index 6df26e37f879..7739a54315e2 100644
--- a/trunk/arch/sparc/kernel/leon_pci_grpci1.c
+++ b/trunk/arch/sparc/kernel/leon_pci_grpci1.c
@@ -536,9 +536,11 @@ static int grpci1_of_probe(struct platform_device *ofdev)
/* find device register base address */
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&ofdev->dev, res);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
+ regs = devm_request_and_ioremap(&ofdev->dev, res);
+ if (!regs) {
+ dev_err(&ofdev->dev, "io-regs mapping failed\n");
+ return -EADDRNOTAVAIL;
+ }
/*
* check that we're in Host Slot and that we can act as a Host Bridge
diff --git a/trunk/arch/sparc/kernel/leon_pmc.c b/trunk/arch/sparc/kernel/leon_pmc.c
index b0b3967a2dd2..bdf53d9a8d46 100644
--- a/trunk/arch/sparc/kernel/leon_pmc.c
+++ b/trunk/arch/sparc/kernel/leon_pmc.c
@@ -47,10 +47,6 @@ void pmc_leon_idle_fixup(void)
* MMU does not get a TLB miss here by using the MMU BYPASS ASI.
*/
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
-
- /* Interrupts need to be enabled to not hang the CPU */
- local_irq_enable();
-
__asm__ __volatile__ (
"wr %%g0, %%asr19\n"
"lda [%0] %1, %%g0\n"
@@ -64,9 +60,6 @@ void pmc_leon_idle_fixup(void)
*/
void pmc_leon_idle(void)
{
- /* Interrupts need to be enabled to not hang the CPU */
- local_irq_enable();
-
/* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("wr %g0, %asr19\n\t");
}
diff --git a/trunk/arch/sparc/kernel/pci.c b/trunk/arch/sparc/kernel/pci.c
index 2031c65fd4ea..baf4366e2d6a 100644
--- a/trunk/arch/sparc/kernel/pci.c
+++ b/trunk/arch/sparc/kernel/pci.c
@@ -773,6 +773,15 @@ static int __pci_mmap_make_offset(struct pci_dev *pdev,
return 0;
}
+/* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
+ * mapping.
+ */
+static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state)
+{
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+}
+
/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
* device mapping.
*/
@@ -800,6 +809,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
if (ret < 0)
return ret;
+ __pci_mmap_set_flags(dev, vma, mmap_state);
__pci_mmap_set_pgprot(dev, vma, mmap_state);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
diff --git a/trunk/arch/sparc/kernel/setup_32.c b/trunk/arch/sparc/kernel/setup_32.c
index 1434526970a6..38bf80a22f02 100644
--- a/trunk/arch/sparc/kernel/setup_32.c
+++ b/trunk/arch/sparc/kernel/setup_32.c
@@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p)
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
- strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+ strcpy(boot_command_line, *cmdline_p);
parse_early_param();
boot_flags_init(*cmdline_p);
diff --git a/trunk/arch/sparc/kernel/setup_64.c b/trunk/arch/sparc/kernel/setup_64.c
index 13785547e435..88a127b9c69e 100644
--- a/trunk/arch/sparc/kernel/setup_64.c
+++ b/trunk/arch/sparc/kernel/setup_64.c
@@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
- strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+ strcpy(boot_command_line, *cmdline_p);
parse_early_param();
boot_flags_init(*cmdline_p);
diff --git a/trunk/arch/sparc/mm/init_64.c b/trunk/arch/sparc/mm/init_64.c
index 04fd55a6e461..a7171997adfd 100644
--- a/trunk/arch/sparc/mm/init_64.c
+++ b/trunk/arch/sparc/mm/init_64.c
@@ -1098,14 +1098,7 @@ static int __init grab_mblocks(struct mdesc_handle *md)
m->size = *val;
val = mdesc_get_property(md, node,
"address-congruence-offset", NULL);
-
- /* The address-congruence-offset property is optional.
- * Explicity zero it be identifty this.
- */
- if (val)
- m->offset = *val;
- else
- m->offset = 0UL;
+ m->offset = *val;
numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
count - 1, m->base, m->size, m->offset);
diff --git a/trunk/arch/sparc/mm/tlb.c b/trunk/arch/sparc/mm/tlb.c
index 37e7bc4c95b3..83d89bcb44af 100644
--- a/trunk/arch/sparc/mm/tlb.c
+++ b/trunk/arch/sparc/mm/tlb.c
@@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
}
if (!tb->active) {
- flush_tsb_user_page(mm, vaddr);
global_flush_tlb_page(mm, vaddr);
+ flush_tsb_user_page(mm, vaddr);
goto out;
}
diff --git a/trunk/arch/sparc/prom/bootstr_32.c b/trunk/arch/sparc/prom/bootstr_32.c
index d2b49d2365e7..f5ec32e0d419 100644
--- a/trunk/arch/sparc/prom/bootstr_32.c
+++ b/trunk/arch/sparc/prom/bootstr_32.c
@@ -23,25 +23,23 @@ prom_getbootargs(void)
return barg_buf;
}
- switch (prom_vers) {
+ switch(prom_vers) {
case PROM_V0:
cp = barg_buf;
/* Start from 1 and go over fd(0,0,0)kernel */
- for (iter = 1; iter < 8; iter++) {
+ for(iter = 1; iter < 8; iter++) {
arg = (*(romvec->pv_v0bootargs))->argv[iter];
if (arg == NULL)
break;
- while (*arg != 0) {
+ while(*arg != 0) {
/* Leave place for space and null. */
- if (cp >= barg_buf + BARG_LEN - 2)
+ if(cp >= barg_buf + BARG_LEN-2){
/* We might issue a warning here. */
break;
+ }
*cp++ = *arg++;
}
*cp++ = ' ';
- if (cp >= barg_buf + BARG_LEN - 1)
- /* We might issue a warning here. */
- break;
}
*cp = 0;
break;
diff --git a/trunk/arch/sparc/prom/tree_64.c b/trunk/arch/sparc/prom/tree_64.c
index bd1b2a3ac34e..92204c3800b5 100644
--- a/trunk/arch/sparc/prom/tree_64.c
+++ b/trunk/arch/sparc/prom/tree_64.c
@@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node)
return prom_node_to_node("child", node);
}
-phandle prom_getchild(phandle node)
+inline phandle prom_getchild(phandle node)
{
phandle cnode;
@@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node)
return prom_node_to_node(prom_peer_name, node);
}
-phandle prom_getsibling(phandle node)
+inline phandle prom_getsibling(phandle node)
{
phandle sibnode;
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling);
/* Return the length in bytes of property 'prop' at node 'node'.
* Return -1 on error.
*/
-int prom_getproplen(phandle node, const char *prop)
+inline int prom_getproplen(phandle node, const char *prop)
{
unsigned long args[6];
@@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen);
* 'buffer' which has a size of 'bufsize'. If the acquisition
* was successful the length will be returned, else -1 is returned.
*/
-int prom_getproperty(phandle node, const char *prop,
- char *buffer, int bufsize)
+inline int prom_getproperty(phandle node, const char *prop,
+ char *buffer, int bufsize)
{
unsigned long args[8];
int plen;
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty);
/* Acquire an integer property and return its value. Returns -1
* on failure.
*/
-int prom_getint(phandle node, const char *prop)
+inline int prom_getint(phandle node, const char *prop)
{
int intprop;
@@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop";
/* Return the first property type for node 'node'.
* buffer should be at least 32B in length
*/
-char *prom_firstprop(phandle node, char *buffer)
+inline char *prom_firstprop(phandle node, char *buffer)
{
unsigned long args[7];
@@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop);
* at node 'node' . Returns NULL string if no more
* property types for this node.
*/
-char *prom_nextprop(phandle node, const char *oprop, char *buffer)
+inline char *prom_nextprop(phandle node, const char *oprop, char *buffer)
{
unsigned long args[7];
char buf[32];
diff --git a/trunk/arch/tile/include/asm/pgtable.h b/trunk/arch/tile/include/asm/pgtable.h
index 33587f16c152..73b1a4c9ad03 100644
--- a/trunk/arch/tile/include/asm/pgtable.h
+++ b/trunk/arch/tile/include/asm/pgtable.h
@@ -362,6 +362,9 @@ do { \
#define kern_addr_valid(addr) (1)
#endif /* CONFIG_FLATMEM */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
extern void vmalloc_sync_all(void);
#endif /* !__ASSEMBLY__ */
diff --git a/trunk/arch/tile/lib/exports.c b/trunk/arch/tile/lib/exports.c
index a93b02a25222..4385cb6fa00a 100644
--- a/trunk/arch/tile/lib/exports.c
+++ b/trunk/arch/tile/lib/exports.c
@@ -84,6 +84,4 @@ uint64_t __ashrdi3(uint64_t, unsigned int);
EXPORT_SYMBOL(__ashrdi3);
uint64_t __ashldi3(uint64_t, unsigned int);
EXPORT_SYMBOL(__ashldi3);
-int __ffsdi2(uint64_t);
-EXPORT_SYMBOL(__ffsdi2);
#endif
diff --git a/trunk/arch/um/drivers/mconsole_kern.c b/trunk/arch/um/drivers/mconsole_kern.c
index 3df3bd544492..d7d21851e60c 100644
--- a/trunk/arch/um/drivers/mconsole_kern.c
+++ b/trunk/arch/um/drivers/mconsole_kern.c
@@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req)
}
do {
- loff_t pos = file->f_pos;
+ loff_t pos;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
diff --git a/trunk/arch/um/include/asm/pgtable.h b/trunk/arch/um/include/asm/pgtable.h
index bf974f712af7..ae02909a1875 100644
--- a/trunk/arch/um/include/asm/pgtable.h
+++ b/trunk/arch/um/include/asm/pgtable.h
@@ -69,6 +69,8 @@ extern unsigned long end_iomem;
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+#define io_remap_pfn_range remap_pfn_range
+
/*
* The i386 can't do page protection for execute, and considers that the same
* are read.
diff --git a/trunk/arch/unicore32/include/asm/pgtable.h b/trunk/arch/unicore32/include/asm/pgtable.h
index 233c25880df4..68b2f297ac97 100644
--- a/trunk/arch/unicore32/include/asm/pgtable.h
+++ b/trunk/arch/unicore32/include/asm/pgtable.h
@@ -303,6 +303,13 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#include
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma, from, pfn, size, prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig
index fe120da25625..685692c94f05 100644
--- a/trunk/arch/x86/Kconfig
+++ b/trunk/arch/x86/Kconfig
@@ -2265,7 +2265,6 @@ source "fs/Kconfig.binfmt"
config IA32_EMULATION
bool "IA32 Emulation"
depends on X86_64
- select BINFMT_ELF
select COMPAT_BINFMT_ELF
select HAVE_UID16
---help---
diff --git a/trunk/arch/x86/crypto/aesni-intel_asm.S b/trunk/arch/x86/crypto/aesni-intel_asm.S
index 477e9d75149b..62fe22cd4cba 100644
--- a/trunk/arch/x86/crypto/aesni-intel_asm.S
+++ b/trunk/arch/x86/crypto/aesni-intel_asm.S
@@ -2681,68 +2681,56 @@ ENTRY(aesni_xts_crypt8)
addq %rcx, KEYP
movdqa IV, STATE1
- movdqu 0x00(INP), INC
- pxor INC, STATE1
+ pxor 0x00(INP), STATE1
movdqu IV, 0x00(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE2
- movdqu 0x10(INP), INC
- pxor INC, STATE2
+ pxor 0x10(INP), STATE2
movdqu IV, 0x10(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE3
- movdqu 0x20(INP), INC
- pxor INC, STATE3
+ pxor 0x20(INP), STATE3
movdqu IV, 0x20(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE4
- movdqu 0x30(INP), INC
- pxor INC, STATE4
+ pxor 0x30(INP), STATE4
movdqu IV, 0x30(OUTP)
call *%r11
- movdqu 0x00(OUTP), INC
- pxor INC, STATE1
+ pxor 0x00(OUTP), STATE1
movdqu STATE1, 0x00(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE1
- movdqu 0x40(INP), INC
- pxor INC, STATE1
+ pxor 0x40(INP), STATE1
movdqu IV, 0x40(OUTP)
- movdqu 0x10(OUTP), INC
- pxor INC, STATE2
+ pxor 0x10(OUTP), STATE2
movdqu STATE2, 0x10(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE2
- movdqu 0x50(INP), INC
- pxor INC, STATE2
+ pxor 0x50(INP), STATE2
movdqu IV, 0x50(OUTP)
- movdqu 0x20(OUTP), INC
- pxor INC, STATE3
+ pxor 0x20(OUTP), STATE3
movdqu STATE3, 0x20(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE3
- movdqu 0x60(INP), INC
- pxor INC, STATE3
+ pxor 0x60(INP), STATE3
movdqu IV, 0x60(OUTP)
- movdqu 0x30(OUTP), INC
- pxor INC, STATE4
+ pxor 0x30(OUTP), STATE4
movdqu STATE4, 0x30(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE4
- movdqu 0x70(INP), INC
- pxor INC, STATE4
+ pxor 0x70(INP), STATE4
movdqu IV, 0x70(OUTP)
_aesni_gf128mul_x_ble()
@@ -2750,20 +2738,16 @@ ENTRY(aesni_xts_crypt8)
call *%r11
- movdqu 0x40(OUTP), INC
- pxor INC, STATE1
+ pxor 0x40(OUTP), STATE1
movdqu STATE1, 0x40(OUTP)
- movdqu 0x50(OUTP), INC
- pxor INC, STATE2
+ pxor 0x50(OUTP), STATE2
movdqu STATE2, 0x50(OUTP)
- movdqu 0x60(OUTP), INC
- pxor INC, STATE3
+ pxor 0x60(OUTP), STATE3
movdqu STATE3, 0x60(OUTP)
- movdqu 0x70(OUTP), INC
- pxor INC, STATE4
+ pxor 0x70(OUTP), STATE4
movdqu STATE4, 0x70(OUTP)
ret
diff --git a/trunk/arch/x86/ia32/ia32_aout.c b/trunk/arch/x86/ia32/ia32_aout.c
index 52ff81cce008..805078e08013 100644
--- a/trunk/arch/x86/ia32/ia32_aout.c
+++ b/trunk/arch/x86/ia32/ia32_aout.c
@@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
/* struct user */
DUMP_WRITE(&dump, sizeof(dump));
/* Now dump all of the user data. Include malloced stuff as well */
- DUMP_SEEK(PAGE_SIZE - sizeof(dump));
+ DUMP_SEEK(PAGE_SIZE);
/* now we start writing out the user space info */
set_fs(USER_DS);
/* Dump the data area */
diff --git a/trunk/arch/x86/include/asm/irq.h b/trunk/arch/x86/include/asm/irq.h
index 57873beb3292..ba870bb6dd8e 100644
--- a/trunk/arch/x86/include/asm/irq.h
+++ b/trunk/arch/x86/include/asm/irq.h
@@ -41,9 +41,4 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
extern void init_ISA_irqs(void);
-#ifdef CONFIG_X86_LOCAL_APIC
-void arch_trigger_all_cpu_backtrace(void);
-#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
-#endif
-
#endif /* _ASM_X86_IRQ_H */
diff --git a/trunk/arch/x86/include/asm/microcode.h b/trunk/arch/x86/include/asm/microcode.h
index 6bc3985ee473..6825e2efd1b4 100644
--- a/trunk/arch/x86/include/asm/microcode.h
+++ b/trunk/arch/x86/include/asm/microcode.h
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
#ifdef CONFIG_MICROCODE_EARLY
#define MAX_UCODE_COUNT 128
extern void __init load_ucode_bsp(void);
-extern void __cpuinit load_ucode_ap(void);
+extern __init void load_ucode_ap(void);
extern int __init save_microcode_in_initrd(void);
#else
static inline void __init load_ucode_bsp(void) {}
-static inline void __cpuinit load_ucode_ap(void) {}
+static inline __init void load_ucode_ap(void) {}
static inline int __init save_microcode_in_initrd(void)
{
return 0;
diff --git a/trunk/arch/x86/include/asm/nmi.h b/trunk/arch/x86/include/asm/nmi.h
index 86f9301903c8..c0fa356e90de 100644
--- a/trunk/arch/x86/include/asm/nmi.h
+++ b/trunk/arch/x86/include/asm/nmi.h
@@ -18,7 +18,9 @@ extern int proc_nmi_enabled(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
extern int unknown_nmi_panic;
-#endif /* CONFIG_X86_LOCAL_APIC */
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+#endif
#define NMI_FLAG_FIRST 1
diff --git a/trunk/arch/x86/include/asm/pgtable.h b/trunk/arch/x86/include/asm/pgtable.h
index 5b0818bc8963..1e672234c4ff 100644
--- a/trunk/arch/x86/include/asm/pgtable.h
+++ b/trunk/arch/x86/include/asm/pgtable.h
@@ -506,6 +506,9 @@ static inline unsigned long pages_to_mb(unsigned long npg)
return npg >> (20 - PAGE_SHIFT);
}
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#if PAGETABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
diff --git a/trunk/arch/x86/kernel/apic/hw_nmi.c b/trunk/arch/x86/kernel/apic/hw_nmi.c
index a698d7165c96..31cb9ae992b7 100644
--- a/trunk/arch/x86/kernel/apic/hw_nmi.c
+++ b/trunk/arch/x86/kernel/apic/hw_nmi.c
@@ -9,7 +9,6 @@
*
*/
#include
-#include
#include
#include
diff --git a/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c b/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c
index 5f90b85ff22e..35ffda5d0727 100644
--- a/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits)
if (mtrr_tom2)
x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
+ nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
/*
* [0, 1M) should always be covered by var mtrr with WB
* and fixed mtrrs should take effect before var mtrr for it:
*/
- nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
+ nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
1ULL<<(20 - PAGE_SHIFT));
- /* add from var mtrr at last */
- nr_range = x86_get_mtrr_mem_range(range, nr_range,
- x_remove_base, x_remove_size);
+ /* Sort the ranges: */
+ sort_range(range, nr_range);
range_sums = sum_ranges(range, nr_range);
printk(KERN_INFO "total RAM covered: %ldM\n",
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
index a9e22073bd56..f60d41ff9a97 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
EVENT_EXTRA_END
};
static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
- INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
EVENT_EXTRA_END
};
diff --git a/trunk/arch/x86/kernel/kprobes/core.c b/trunk/arch/x86/kernel/kprobes/core.c
index 211bce445522..9895a9a41380 100644
--- a/trunk/arch/x86/kernel/kprobes/core.c
+++ b/trunk/arch/x86/kernel/kprobes/core.c
@@ -365,14 +365,10 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
return insn.length;
}
-static int __kprobes arch_copy_kprobe(struct kprobe *p)
+static void __kprobes arch_copy_kprobe(struct kprobe *p)
{
- int ret;
-
/* Copy an instruction with recovering if other optprobe modifies it.*/
- ret = __copy_instruction(p->ainsn.insn, p->addr);
- if (!ret)
- return -EINVAL;
+ __copy_instruction(p->ainsn.insn, p->addr);
/*
* __copy_instruction can modify the displacement of the instruction,
@@ -388,8 +384,6 @@ static int __kprobes arch_copy_kprobe(struct kprobe *p)
/* Also, displacement change doesn't affect the first byte */
p->opcode = p->ainsn.insn[0];
-
- return 0;
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -403,8 +397,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn)
return -ENOMEM;
-
- return arch_copy_kprobe(p);
+ arch_copy_kprobe(p);
+ return 0;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/trunk/arch/x86/kernel/kvmclock.c b/trunk/arch/x86/kernel/kvmclock.c
index 3dd37ebd591b..d2c381280e3c 100644
--- a/trunk/arch/x86/kernel/kvmclock.c
+++ b/trunk/arch/x86/kernel/kvmclock.c
@@ -242,7 +242,6 @@ void __init kvmclock_init(void)
if (!mem)
return;
hv_clock = __va(mem);
- memset(hv_clock, 0, size);
if (kvm_register_clock("boot clock")) {
hv_clock = NULL;
diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c
index 81a5f5e8f142..4e7a37ff03ab 100644
--- a/trunk/arch/x86/kernel/process.c
+++ b/trunk/arch/x86/kernel/process.c
@@ -277,6 +277,18 @@ void exit_idle(void)
}
#endif
+void arch_cpu_idle_prepare(void)
+{
+ /*
+ * If we're the non-boot CPU, nothing set the stack canary up
+ * for us. CPU0 already has it initialized but no harm in
+ * doing it again. This is a good place for updating it, as
+ * we wont ever return from this function (so the invalid
+ * canaries already on the stack wont ever trigger).
+ */
+ boot_init_stack_canary();
+}
+
void arch_cpu_idle_enter(void)
{
local_touch_nmi();
diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c
index bfd348e99369..9c73b51817e4 100644
--- a/trunk/arch/x86/kernel/smpboot.c
+++ b/trunk/arch/x86/kernel/smpboot.c
@@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
void __cpuinit set_cpu_sibling_map(int cpu)
{
+ bool has_mc = boot_cpu_data.x86_max_cores > 1;
bool has_smt = smp_num_siblings > 1;
- bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct cpuinfo_x86 *o;
int i;
cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
- if (!has_mp) {
+ if (!has_smt && !has_mc) {
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
cpumask_set_cpu(cpu, cpu_core_mask(cpu));
@@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
if ((i == cpu) || (has_smt && match_smt(c, o)))
link_mask(sibling, cpu, i);
- if ((i == cpu) || (has_mp && match_llc(c, o)))
+ if ((i == cpu) || (has_mc && match_llc(c, o)))
link_mask(llc_shared, cpu, i);
}
@@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
for_each_cpu(i, cpu_sibling_setup_mask) {
o = &cpu_data(i);
- if ((i == cpu) || (has_mp && match_mc(c, o))) {
+ if ((i == cpu) || (has_mc && match_mc(c, o))) {
link_mask(core, cpu, i);
/*
diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c
index e8ba99c34180..094b5d96ab14 100644
--- a/trunk/arch/x86/kvm/x86.c
+++ b/trunk/arch/x86/kvm/x86.c
@@ -582,6 +582,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
if (index != XCR_XFEATURE_ENABLED_MASK)
return 1;
xcr0 = xcr;
+ if (kvm_x86_ops->get_cpl(vcpu) != 0)
+ return 1;
if (!(xcr0 & XSTATE_FP))
return 1;
if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
@@ -595,8 +597,7 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
- if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
- __kvm_set_xcr(vcpu, index, xcr)) {
+ if (__kvm_set_xcr(vcpu, index, xcr)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c
index d2fbcedcf6ea..5ae2eb09419e 100644
--- a/trunk/arch/x86/platform/efi/efi.c
+++ b/trunk/arch/x86/platform/efi/efi.c
@@ -1069,10 +1069,7 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
* that by attempting to use more space than is available.
*/
unsigned long dummy_size = remaining_size + 1024;
- void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
-
- if (!dummy)
- return EFI_OUT_OF_RESOURCES;
+ void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
EFI_VARIABLE_NON_VOLATILE |
@@ -1092,8 +1089,6 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
0, dummy);
}
- kfree(dummy);
-
/*
* The runtime code may now have triggered a garbage collection
* run, so check the variable info again
diff --git a/trunk/arch/xtensa/include/asm/pgtable.h b/trunk/arch/xtensa/include/asm/pgtable.h
index 8f017eb309bd..d7546c94da52 100644
--- a/trunk/arch/xtensa/include/asm/pgtable.h
+++ b/trunk/arch/xtensa/include/asm/pgtable.h
@@ -393,6 +393,14 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t *ptep);
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+
+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
typedef pte_t *pte_addr_t;
#endif /* !defined (__ASSEMBLY__) */
diff --git a/trunk/crypto/algboss.c b/trunk/crypto/algboss.c
index 76fc0b23fc6c..769219b29309 100644
--- a/trunk/crypto/algboss.c
+++ b/trunk/crypto/algboss.c
@@ -45,9 +45,10 @@ struct cryptomgr_param {
} nu32;
} attrs[CRYPTO_MAX_ATTRS];
+ char larval[CRYPTO_MAX_ALG_NAME];
char template[CRYPTO_MAX_ALG_NAME];
- struct crypto_larval *larval;
+ struct completion *completion;
u32 otype;
u32 omask;
@@ -86,8 +87,7 @@ static int cryptomgr_probe(void *data)
crypto_tmpl_put(tmpl);
out:
- complete_all(¶m->larval->completion);
- crypto_alg_put(¶m->larval->alg);
+ complete_all(param->completion);
kfree(param);
module_put_and_exit(0);
}
@@ -187,19 +187,18 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
param->otype = larval->alg.cra_flags;
param->omask = larval->mask;
- crypto_alg_get(&larval->alg);
- param->larval = larval;
+ memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME);
+
+ param->completion = &larval->completion;
thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
if (IS_ERR(thread))
- goto err_put_larval;
+ goto err_free_param;
wait_for_completion_interruptible(&larval->completion);
return NOTIFY_STOP;
-err_put_larval:
- crypto_alg_put(&larval->alg);
err_free_param:
kfree(param);
err_put_module:
diff --git a/trunk/crypto/api.c b/trunk/crypto/api.c
index 3b6180336d3d..033a7147e5eb 100644
--- a/trunk/crypto/api.c
+++ b/trunk/crypto/api.c
@@ -34,6 +34,12 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);
+static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
+{
+ atomic_inc(&alg->cra_refcnt);
+ return alg;
+}
+
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
{
return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
diff --git a/trunk/crypto/internal.h b/trunk/crypto/internal.h
index bd39bfc92eab..9ebedae3fb54 100644
--- a/trunk/crypto/internal.h
+++ b/trunk/crypto/internal.h
@@ -103,12 +103,6 @@ int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v);
-static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
-{
- atomic_inc(&alg->cra_refcnt);
- return alg;
-}
-
static inline void crypto_alg_put(struct crypto_alg *alg)
{
if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
diff --git a/trunk/drivers/acpi/acpi_lpss.c b/trunk/drivers/acpi/acpi_lpss.c
index cab13f2fc28e..652fd5ce303c 100644
--- a/trunk/drivers/acpi/acpi_lpss.c
+++ b/trunk/drivers/acpi/acpi_lpss.c
@@ -164,24 +164,15 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
if (dev_desc->clk_required) {
ret = register_device_clock(adev, pdata);
if (ret) {
- /* Skip the device, but continue the namespace scan. */
- ret = 0;
- goto err_out;
+ /*
+ * Skip the device, but don't terminate the namespace
+ * scan.
+ */
+ kfree(pdata);
+ return 0;
}
}
- /*
- * This works around a known issue in ACPI tables where LPSS devices
- * have _PS0 and _PS3 without _PSC (and no power resources), so
- * acpi_bus_init_power() will assume that the BIOS has put them into D0.
- */
- ret = acpi_device_fix_up_power(adev);
- if (ret) {
- /* Skip the device, but continue the namespace scan. */
- ret = 0;
- goto err_out;
- }
-
adev->driver_data = pdata;
ret = acpi_create_platform_device(adev, id);
if (ret > 0)
diff --git a/trunk/drivers/acpi/device_pm.c b/trunk/drivers/acpi/device_pm.c
index 31c217a42839..318fa32a141e 100644
--- a/trunk/drivers/acpi/device_pm.c
+++ b/trunk/drivers/acpi/device_pm.c
@@ -290,26 +290,6 @@ int acpi_bus_init_power(struct acpi_device *device)
return 0;
}
-/**
- * acpi_device_fix_up_power - Force device with missing _PSC into D0.
- * @device: Device object whose power state is to be fixed up.
- *
- * Devices without power resources and _PSC, but having _PS0 and _PS3 defined,
- * are assumed to be put into D0 by the BIOS. However, in some cases that may
- * not be the case and this function should be used then.
- */
-int acpi_device_fix_up_power(struct acpi_device *device)
-{
- int ret = 0;
-
- if (!device->power.flags.power_resources
- && !device->power.flags.explicit_get
- && device->power.state == ACPI_STATE_D0)
- ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
-
- return ret;
-}
-
int acpi_bus_update_power(acpi_handle handle, int *state_p)
{
struct acpi_device *device;
diff --git a/trunk/drivers/acpi/dock.c b/trunk/drivers/acpi/dock.c
index 14de9f46972e..4fdea381ef21 100644
--- a/trunk/drivers/acpi/dock.c
+++ b/trunk/drivers/acpi/dock.c
@@ -66,21 +66,20 @@ struct dock_station {
spinlock_t dd_lock;
struct mutex hp_lock;
struct list_head dependent_devices;
+ struct list_head hotplug_devices;
struct list_head sibling;
struct platform_device *dock_device;
};
static LIST_HEAD(dock_stations);
static int dock_station_count;
-static DEFINE_MUTEX(hotplug_lock);
struct dock_dependent_device {
struct list_head list;
+ struct list_head hotplug_list;
acpi_handle handle;
- const struct acpi_dock_ops *hp_ops;
- void *hp_context;
- unsigned int hp_refcount;
- void (*hp_release)(void *);
+ const struct acpi_dock_ops *ops;
+ void *context;
};
#define DOCK_DOCKING 0x00000001
@@ -112,6 +111,7 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
dd->handle = handle;
INIT_LIST_HEAD(&dd->list);
+ INIT_LIST_HEAD(&dd->hotplug_list);
spin_lock(&ds->dd_lock);
list_add_tail(&dd->list, &ds->dependent_devices);
@@ -121,90 +121,35 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
}
/**
- * dock_init_hotplug - Initialize a hotplug device on a docking station.
- * @dd: Dock-dependent device.
- * @ops: Dock operations to attach to the dependent device.
- * @context: Data to pass to the @ops callbacks and @release.
- * @init: Optional initialization routine to run after setting up context.
- * @release: Optional release routine to run on removal.
+ * dock_add_hotplug_device - associate a hotplug handler with the dock station
+ * @ds: The dock station
+ * @dd: The dependent device struct
+ *
+ * Add the dependent device to the dock's hotplug device list
*/
-static int dock_init_hotplug(struct dock_dependent_device *dd,
- const struct acpi_dock_ops *ops, void *context,
- void (*init)(void *), void (*release)(void *))
+static void
+dock_add_hotplug_device(struct dock_station *ds,
+ struct dock_dependent_device *dd)
{
- int ret = 0;
-
- mutex_lock(&hotplug_lock);
-
- if (dd->hp_context) {
- ret = -EEXIST;
- } else {
- dd->hp_refcount = 1;
- dd->hp_ops = ops;
- dd->hp_context = context;
- dd->hp_release = release;
- }
-
- if (!WARN_ON(ret) && init)
- init(context);
-
- mutex_unlock(&hotplug_lock);
- return ret;
+ mutex_lock(&ds->hp_lock);
+ list_add_tail(&dd->hotplug_list, &ds->hotplug_devices);
+ mutex_unlock(&ds->hp_lock);
}
/**
- * dock_release_hotplug - Decrement hotplug reference counter of dock device.
- * @dd: Dock-dependent device.
+ * dock_del_hotplug_device - remove a hotplug handler from the dock station
+ * @ds: The dock station
+ * @dd: the dependent device struct
*
- * Decrement the reference counter of @dd and if 0, detach its hotplug
- * operations from it, reset its context pointer and run the optional release
- * routine if present.
+ * Delete the dependent device from the dock's hotplug device list
*/
-static void dock_release_hotplug(struct dock_dependent_device *dd)
+static void
+dock_del_hotplug_device(struct dock_station *ds,
+ struct dock_dependent_device *dd)
{
- void (*release)(void *) = NULL;
- void *context = NULL;
-
- mutex_lock(&hotplug_lock);
-
- if (dd->hp_context && !--dd->hp_refcount) {
- dd->hp_ops = NULL;
- context = dd->hp_context;
- dd->hp_context = NULL;
- release = dd->hp_release;
- dd->hp_release = NULL;
- }
-
- if (release && context)
- release(context);
-
- mutex_unlock(&hotplug_lock);
-}
-
-static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
- bool uevent)
-{
- acpi_notify_handler cb = NULL;
- bool run = false;
-
- mutex_lock(&hotplug_lock);
-
- if (dd->hp_context) {
- run = true;
- dd->hp_refcount++;
- if (dd->hp_ops)
- cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler;
- }
-
- mutex_unlock(&hotplug_lock);
-
- if (!run)
- return;
-
- if (cb)
- cb(dd->handle, event, dd->hp_context);
-
- dock_release_hotplug(dd);
+ mutex_lock(&ds->hp_lock);
+ list_del(&dd->hotplug_list);
+ mutex_unlock(&ds->hp_lock);
}
/**
@@ -415,8 +360,9 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
/*
* First call driver specific hotplug functions
*/
- list_for_each_entry(dd, &ds->dependent_devices, list)
- dock_hotplug_event(dd, event, false);
+ list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
+ if (dd->ops && dd->ops->handler)
+ dd->ops->handler(dd->handle, event, dd->context);
/*
* Now make sure that an acpi_device is created for each
@@ -452,8 +398,9 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
if (num == DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
- list_for_each_entry(dd, &ds->dependent_devices, list)
- dock_hotplug_event(dd, event, true);
+ list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
+ if (dd->ops && dd->ops->uevent)
+ dd->ops->uevent(dd->handle, event, dd->context);
if (num != DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
@@ -623,24 +570,19 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
* @handle: the handle of the device
* @ops: handlers to call after docking
* @context: device specific data
- * @init: Optional initialization routine to run after registration
- * @release: Optional release routine to run on unregistration
*
* If a driver would like to perform a hotplug operation after a dock
* event, they can register an acpi_notifiy_handler to be called by
* the dock driver after _DCK is executed.
*/
-int register_hotplug_dock_device(acpi_handle handle,
- const struct acpi_dock_ops *ops, void *context,
- void (*init)(void *), void (*release)(void *))
+int
+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
+ void *context)
{
struct dock_dependent_device *dd;
struct dock_station *dock_station;
int ret = -EINVAL;
- if (WARN_ON(!context))
- return -EINVAL;
-
if (!dock_station_count)
return -ENODEV;
@@ -655,8 +597,12 @@ int register_hotplug_dock_device(acpi_handle handle,
* ops
*/
dd = find_dock_dependent_device(dock_station, handle);
- if (dd && !dock_init_hotplug(dd, ops, context, init, release))
+ if (dd) {
+ dd->ops = ops;
+ dd->context = context;
+ dock_add_hotplug_device(dock_station, dd);
ret = 0;
+ }
}
return ret;
@@ -678,7 +624,7 @@ void unregister_hotplug_dock_device(acpi_handle handle)
list_for_each_entry(dock_station, &dock_stations, sibling) {
dd = find_dock_dependent_device(dock_station, handle);
if (dd)
- dock_release_hotplug(dd);
+ dock_del_hotplug_device(dock_station, dd);
}
}
EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
@@ -922,10 +868,8 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
if (!count)
return -EINVAL;
- acpi_scan_lock_acquire();
begin_undock(dock_station);
ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
- acpi_scan_lock_release();
return ret ? ret: count;
}
static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
@@ -1007,6 +951,7 @@ static int __init dock_add(acpi_handle handle)
mutex_init(&dock_station->hp_lock);
spin_lock_init(&dock_station->dd_lock);
INIT_LIST_HEAD(&dock_station->sibling);
+ INIT_LIST_HEAD(&dock_station->hotplug_devices);
ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
INIT_LIST_HEAD(&dock_station->dependent_devices);
@@ -1046,6 +991,30 @@ static int __init dock_add(acpi_handle handle)
return ret;
}
+/**
+ * dock_remove - free up resources related to the dock station
+ */
+static int dock_remove(struct dock_station *ds)
+{
+ struct dock_dependent_device *dd, *tmp;
+ struct platform_device *dock_device = ds->dock_device;
+
+ if (!dock_station_count)
+ return 0;
+
+ /* remove dependent devices */
+ list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list)
+ kfree(dd);
+
+ list_del(&ds->sibling);
+
+ /* cleanup sysfs */
+ sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group);
+ platform_device_unregister(dock_device);
+
+ return 0;
+}
+
/**
* find_dock_and_bay - look for dock stations and bays
* @handle: acpi handle of a device
@@ -1064,7 +1033,7 @@ find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
-int __init acpi_dock_init(void)
+static int __init dock_init(void)
{
if (acpi_disabled)
return 0;
@@ -1083,3 +1052,19 @@ int __init acpi_dock_init(void)
ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
return 0;
}
+
+static void __exit dock_exit(void)
+{
+ struct dock_station *tmp, *dock_station;
+
+ unregister_acpi_bus_notifier(&dock_acpi_notifier);
+ list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
+ dock_remove(dock_station);
+}
+
+/*
+ * Must be called before drivers of devices in dock, otherwise we can't know
+ * which devices are in a dock
+ */
+subsys_initcall(dock_init);
+module_exit(dock_exit);
diff --git a/trunk/drivers/acpi/internal.h b/trunk/drivers/acpi/internal.h
index c610a76d92c4..297cbf456f86 100644
--- a/trunk/drivers/acpi/internal.h
+++ b/trunk/drivers/acpi/internal.h
@@ -40,11 +40,6 @@ void acpi_container_init(void);
#else
static inline void acpi_container_init(void) {}
#endif
-#ifdef CONFIG_ACPI_DOCK
-void acpi_dock_init(void);
-#else
-static inline void acpi_dock_init(void) {}
-#endif
#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
void acpi_memory_hotplug_init(void);
#else
diff --git a/trunk/drivers/acpi/power.c b/trunk/drivers/acpi/power.c
index 288bb270f8ed..f962047c6c85 100644
--- a/trunk/drivers/acpi/power.c
+++ b/trunk/drivers/acpi/power.c
@@ -885,7 +885,6 @@ int acpi_add_power_resource(acpi_handle handle)
ACPI_STA_DEFAULT);
mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->dependent);
- INIT_LIST_HEAD(&resource->list_node);
resource->name = device->pnp.bus_id;
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
diff --git a/trunk/drivers/acpi/resource.c b/trunk/drivers/acpi/resource.c
index 3322b47ab7ca..a3868f6c222a 100644
--- a/trunk/drivers/acpi/resource.c
+++ b/trunk/drivers/acpi/resource.c
@@ -304,8 +304,7 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
}
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
- u8 triggering, u8 polarity, u8 shareable,
- bool legacy)
+ u8 triggering, u8 polarity, u8 shareable)
{
int irq, p, t;
@@ -318,19 +317,14 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
* In IO-APIC mode, use overrided attribute. Two reasons:
* 1. BIOS bug in DSDT
* 2. BIOS uses IO-APIC mode Interrupt Source Override
- *
- * We do this only if we are dealing with IRQ() or IRQNoFlags()
- * resource (the legacy ISA resources). With modern ACPI 5 devices
- * using extended IRQ descriptors we take the IRQ configuration
- * from _CRS directly.
*/
- if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
+ if (!acpi_get_override_irq(gsi, &t, &p)) {
u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
if (triggering != trig || polarity != pol) {
pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
- t ? "level" : "edge", p ? "low" : "high");
+ t ? "edge" : "level", p ? "low" : "high");
triggering = trig;
polarity = pol;
}
@@ -379,7 +373,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
- irq->sharable, true);
+ irq->sharable);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;
@@ -389,7 +383,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
ext_irq->triggering, ext_irq->polarity,
- ext_irq->sharable, false);
+ ext_irq->sharable);
break;
default:
return false;
diff --git a/trunk/drivers/acpi/scan.c b/trunk/drivers/acpi/scan.c
index 27da63061e11..b14ac46948c9 100644
--- a/trunk/drivers/acpi/scan.c
+++ b/trunk/drivers/acpi/scan.c
@@ -2042,7 +2042,6 @@ int __init acpi_scan_init(void)
acpi_lpss_init();
acpi_container_init();
acpi_memory_hotplug_init();
- acpi_dock_init();
mutex_lock(&acpi_scan_lock);
/*
diff --git a/trunk/drivers/ata/libata-acpi.c b/trunk/drivers/ata/libata-acpi.c
index cf4e7020adac..87f2f395d79a 100644
--- a/trunk/drivers/ata/libata-acpi.c
+++ b/trunk/drivers/ata/libata-acpi.c
@@ -156,10 +156,8 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
spin_unlock_irqrestore(ap->lock, flags);
- if (wait) {
+ if (wait)
ata_port_wait_eh(ap);
- flush_work(&ap->hotplug_task.work);
- }
}
static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
@@ -216,39 +214,6 @@ static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
.uevent = ata_acpi_ap_uevent,
};
-void ata_acpi_hotplug_init(struct ata_host *host)
-{
- int i;
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
- acpi_handle handle;
- struct ata_device *dev;
-
- if (!ap)
- continue;
-
- handle = ata_ap_acpi_handle(ap);
- if (handle) {
- /* we might be on a docking station */
- register_hotplug_dock_device(handle,
- &ata_acpi_ap_dock_ops, ap,
- NULL, NULL);
- }
-
- ata_for_each_dev(dev, &ap->link, ALL) {
- handle = ata_dev_acpi_handle(dev);
- if (!handle)
- continue;
-
- /* we might be on a docking station */
- register_hotplug_dock_device(handle,
- &ata_acpi_dev_dock_ops,
- dev, NULL, NULL);
- }
- }
-}
-
/**
* ata_acpi_dissociate - dissociate ATA host from ACPI objects
* @host: target ATA host
diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c
index adf002a3c584..f2184276539d 100644
--- a/trunk/drivers/ata/libata-core.c
+++ b/trunk/drivers/ata/libata-core.c
@@ -6148,8 +6148,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
if (rc)
goto err_tadd;
- ata_acpi_hotplug_init(host);
-
/* set cable, sata_spd_limit and report */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
diff --git a/trunk/drivers/ata/libata.h b/trunk/drivers/ata/libata.h
index 577d902bc4de..c949dd311b2e 100644
--- a/trunk/drivers/ata/libata.h
+++ b/trunk/drivers/ata/libata.h
@@ -122,7 +122,6 @@ extern int ata_acpi_register(void);
extern void ata_acpi_unregister(void);
extern void ata_acpi_bind(struct ata_device *dev);
extern void ata_acpi_unbind(struct ata_device *dev);
-extern void ata_acpi_hotplug_init(struct ata_host *host);
#else
static inline void ata_acpi_dissociate(struct ata_host *host) { }
static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -135,7 +134,6 @@ static inline int ata_acpi_register(void) { return 0; }
static inline void ata_acpi_unregister(void) { }
static inline void ata_acpi_bind(struct ata_device *dev) { }
static inline void ata_acpi_unbind(struct ata_device *dev) { }
-static inline void ata_acpi_hotplug_init(struct ata_host *host) {}
#endif
/* libata-scsi.c */
diff --git a/trunk/drivers/base/firmware_class.c b/trunk/drivers/base/firmware_class.c
index 01e21037d8fe..4b1f9265887f 100644
--- a/trunk/drivers/base/firmware_class.c
+++ b/trunk/drivers/base/firmware_class.c
@@ -450,18 +450,8 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
{
struct firmware_buf *buf = fw_priv->buf;
- /*
- * There is a small window in which user can write to 'loading'
- * between loading done and disappearance of 'loading'
- */
- if (test_bit(FW_STATUS_DONE, &buf->status))
- return;
-
set_bit(FW_STATUS_ABORT, &buf->status);
complete_all(&buf->completion);
-
- /* avoid user action after loading abort */
- fw_priv->buf = NULL;
}
#define is_fw_load_aborted(buf) \
@@ -538,12 +528,7 @@ static ssize_t firmware_loading_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
- int loading = 0;
-
- mutex_lock(&fw_lock);
- if (fw_priv->buf)
- loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
- mutex_unlock(&fw_lock);
+ int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
return sprintf(buf, "%d\n", loading);
}
@@ -585,12 +570,12 @@ static ssize_t firmware_loading_store(struct device *dev,
const char *buf, size_t count)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
- struct firmware_buf *fw_buf;
+ struct firmware_buf *fw_buf = fw_priv->buf;
int loading = simple_strtol(buf, NULL, 10);
int i;
mutex_lock(&fw_lock);
- fw_buf = fw_priv->buf;
+
if (!fw_buf)
goto out;
@@ -792,6 +777,10 @@ static void firmware_class_timeout_work(struct work_struct *work)
struct firmware_priv, timeout_work.work);
mutex_lock(&fw_lock);
+ if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
+ mutex_unlock(&fw_lock);
+ return;
+ }
fw_load_abort(fw_priv);
mutex_unlock(&fw_lock);
}
@@ -872,6 +861,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
cancel_delayed_work_sync(&fw_priv->timeout_work);
+ fw_priv->buf = NULL;
+
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
device_remove_bin_file(f_dev, &firmware_attr_data);
diff --git a/trunk/drivers/block/cryptoloop.c b/trunk/drivers/block/cryptoloop.c
index 99e773cb70d0..8b6bb764b0a3 100644
--- a/trunk/drivers/block/cryptoloop.c
+++ b/trunk/drivers/block/cryptoloop.c
@@ -25,9 +25,9 @@
#include
#include
#include
+#include
#include
#include
-#include "loop.h"
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI");
diff --git a/trunk/drivers/block/loop.c b/trunk/drivers/block/loop.c
index 40e715531aa6..d92d50fd84b7 100644
--- a/trunk/drivers/block/loop.c
+++ b/trunk/drivers/block/loop.c
@@ -63,6 +63,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -75,7 +76,6 @@
#include
#include
#include
-#include "loop.h"
#include
diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c
index aff789d6fccd..3063452e55da 100644
--- a/trunk/drivers/block/rbd.c
+++ b/trunk/drivers/block/rbd.c
@@ -1036,16 +1036,12 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
char *name;
u64 segment;
int ret;
- char *name_format;
name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
if (!name)
return NULL;
segment = offset >> rbd_dev->header.obj_order;
- name_format = "%s.%012llx";
- if (rbd_dev->image_format == 2)
- name_format = "%s.%016llx";
- ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
+ ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
rbd_dev->header.object_prefix, segment);
if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
pr_err("error formatting segment name for #%llu (%d)\n",
@@ -2252,17 +2248,13 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
obj_request->pages, length,
offset & ~PAGE_MASK, false, false);
- /*
- * set obj_request->img_request before formatting
- * the osd_request so that it gets the right snapc
- */
- rbd_img_obj_request_add(img_request, obj_request);
if (write_request)
rbd_osd_req_format_write(obj_request);
else
rbd_osd_req_format_read(obj_request);
obj_request->img_offset = img_offset;
+ rbd_img_obj_request_add(img_request, obj_request);
img_offset += length;
resid -= length;
@@ -4247,10 +4239,6 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
down_write(&rbd_dev->header_rwsem);
- ret = rbd_dev_v2_image_size(rbd_dev);
- if (ret)
- goto out;
-
if (first_time) {
ret = rbd_dev_v2_header_onetime(rbd_dev);
if (ret)
@@ -4284,6 +4272,10 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
"is EXPERIMENTAL!");
}
+ ret = rbd_dev_v2_image_size(rbd_dev);
+ if (ret)
+ goto out;
+
if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
if (rbd_dev->mapping.size != rbd_dev->header.image_size)
rbd_dev->mapping.size = rbd_dev->header.image_size;
diff --git a/trunk/drivers/bluetooth/btmrvl_main.c b/trunk/drivers/bluetooth/btmrvl_main.c
index 9a9f51875df5..3a4343b3bd6d 100644
--- a/trunk/drivers/bluetooth/btmrvl_main.c
+++ b/trunk/drivers/bluetooth/btmrvl_main.c
@@ -498,10 +498,6 @@ static int btmrvl_service_main_thread(void *data)
add_wait_queue(&thread->wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop()) {
- BT_DBG("main_thread: break from main thread");
- break;
- }
if (adapter->wakeup_tries ||
((!adapter->int_count) &&
@@ -517,6 +513,11 @@ static int btmrvl_service_main_thread(void *data)
BT_DBG("main_thread woke up");
+ if (kthread_should_stop()) {
+ BT_DBG("main_thread: break from main thread");
+ break;
+ }
+
spin_lock_irqsave(&priv->driver_lock, flags);
if (adapter->int_count) {
adapter->int_count = 0;
diff --git a/trunk/drivers/clk/clk.c b/trunk/drivers/clk/clk.c
index 1144e8c7579d..934cfd18f72d 100644
--- a/trunk/drivers/clk/clk.c
+++ b/trunk/drivers/clk/clk.c
@@ -1955,7 +1955,6 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
/* XXX the notifier code should handle this better */
if (!cn->notifier_head.head) {
srcu_cleanup_notifier_head(&cn->notifier_head);
- list_del(&cn->node);
kfree(cn);
}
diff --git a/trunk/drivers/clk/samsung/clk-exynos5250.c b/trunk/drivers/clk/samsung/clk-exynos5250.c
index 22d7699e7ced..5c97e75924a8 100644
--- a/trunk/drivers/clk/samsung/clk-exynos5250.c
+++ b/trunk/drivers/clk/samsung/clk-exynos5250.c
@@ -155,7 +155,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = {
/* list of all parent clock list */
PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
-PNAME(mout_cpu_p) = { "mout_apll", "sclk_mpll", };
+PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", };
PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" };
PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" };
PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" };
@@ -208,10 +208,10 @@ struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
};
struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
- MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
- MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
+ MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
+ MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
- MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
+ MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
@@ -378,7 +378,7 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
- GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0),
GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),
diff --git a/trunk/drivers/clk/samsung/clk-pll.c b/trunk/drivers/clk/samsung/clk-pll.c
index 362f12dcd944..89135f6be116 100644
--- a/trunk/drivers/clk/samsung/clk-pll.c
+++ b/trunk/drivers/clk/samsung/clk-pll.c
@@ -111,8 +111,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
- u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
- s16 kdiv;
+ u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
u64 fvco = parent_rate;
pll_con0 = __raw_readl(pll->con_reg);
@@ -120,7 +119,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
- kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK);
+ kdiv = pll_con1 & PLL36XX_KDIV_MASK;
fvco *= (mdiv << 16) + kdiv;
do_div(fvco, (pdiv << sdiv));
diff --git a/trunk/drivers/clk/spear/spear3xx_clock.c b/trunk/drivers/clk/spear/spear3xx_clock.c
index 080c3c5e33f6..f9ec43fd1320 100644
--- a/trunk/drivers/clk/spear/spear3xx_clock.c
+++ b/trunk/drivers/clk/spear/spear3xx_clock.c
@@ -369,7 +369,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
clk_register_clkdev(clk, NULL, "60100000.serial");
}
#else
-static inline void spear320_clk_init(void __iomem *soc_config_base) { }
+static inline void spear320_clk_init(void) { }
#endif
void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
diff --git a/trunk/drivers/clk/tegra/clk-tegra30.c b/trunk/drivers/clk/tegra/clk-tegra30.c
index ba99e3844106..c6921f538e28 100644
--- a/trunk/drivers/clk/tegra/clk-tegra30.c
+++ b/trunk/drivers/clk/tegra/clk-tegra30.c
@@ -1598,12 +1598,6 @@ static void __init tegra30_periph_clk_init(void)
clk_register_clkdev(clk, "afi", "tegra-pcie");
clks[afi] = clk;
- /* pciex */
- clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0,
- 74, &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "pciex", "tegra-pcie");
- clks[pciex] = clk;
-
/* kfuse */
clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
TEGRA_PERIPH_ON_APB,
@@ -1722,6 +1716,11 @@ static void __init tegra30_fixed_clk_init(void)
1, 0, &cml_lock);
clk_register_clkdev(clk, "cml1", NULL);
clks[cml1] = clk;
+
+ /* pciex */
+ clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000);
+ clk_register_clkdev(clk, "pciex", NULL);
+ clks[pciex] = clk;
}
static void __init tegra30_osc_clk_init(void)
diff --git a/trunk/drivers/cpufreq/cpufreq_ondemand.c b/trunk/drivers/cpufreq/cpufreq_ondemand.c
index 93eb5cbcc1f6..4b9bb5def6f1 100644
--- a/trunk/drivers/cpufreq/cpufreq_ondemand.c
+++ b/trunk/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,8 +47,6 @@ static struct od_ops od_ops;
static struct cpufreq_governor cpufreq_gov_ondemand;
#endif
-static unsigned int default_powersave_bias;
-
static void ondemand_powersave_bias_init_cpu(int cpu)
{
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@@ -545,7 +543,7 @@ static int od_init(struct dbs_data *dbs_data)
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
tuners->ignore_nice = 0;
- tuners->powersave_bias = default_powersave_bias;
+ tuners->powersave_bias = 0;
tuners->io_is_busy = should_io_be_busy();
dbs_data->tuners = tuners;
@@ -587,7 +585,6 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
unsigned int cpu;
cpumask_t done;
- default_powersave_bias = powersave_bias;
cpumask_clear(&done);
get_online_cpus();
@@ -596,17 +593,11 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
continue;
policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
- if (!policy)
- continue;
-
- cpumask_or(&done, &done, policy->cpus);
-
- if (policy->governor != &cpufreq_gov_ondemand)
- continue;
-
dbs_data = policy->governor_data;
od_tuners = dbs_data->tuners;
- od_tuners->powersave_bias = default_powersave_bias;
+ od_tuners->powersave_bias = powersave_bias;
+
+ cpumask_or(&done, &done, policy->cpus);
}
put_online_cpus();
}
diff --git a/trunk/drivers/gpio/gpio-omap.c b/trunk/drivers/gpio/gpio-omap.c
index 4a430360af5a..d3f7d2db870f 100644
--- a/trunk/drivers/gpio/gpio-omap.c
+++ b/trunk/drivers/gpio/gpio-omap.c
@@ -1094,9 +1094,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
const struct omap_gpio_platform_data *pdata;
struct resource *res;
struct gpio_bank *bank;
-#ifdef CONFIG_ARCH_OMAP1
- int irq_base;
-#endif
match = of_match_device(of_match_ptr(omap_gpio_match), dev);
@@ -1138,28 +1135,11 @@ static int omap_gpio_probe(struct platform_device *pdev)
pdata->get_context_loss_count;
}
-#ifdef CONFIG_ARCH_OMAP1
- /*
- * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
- * irq_alloc_descs() and irq_domain_add_legacy() and just use a
- * linear IRQ domain mapping for all OMAP platforms.
- */
- irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
- if (irq_base < 0) {
- dev_err(dev, "Couldn't allocate IRQ numbers\n");
- return -ENODEV;
- }
- bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
- 0, &irq_domain_simple_ops, NULL);
-#else
bank->domain = irq_domain_add_linear(node, bank->width,
&irq_domain_simple_ops, NULL);
-#endif
- if (!bank->domain) {
- dev_err(dev, "Couldn't register an IRQ domain\n");
+ if (!bank->domain)
return -ENODEV;
- }
if (bank->regs->set_dataout && bank->regs->clr_dataout)
bank->set_dataout = _set_gpio_dataout_reg;
diff --git a/trunk/drivers/gpu/drm/drm_prime.c b/trunk/drivers/gpu/drm/drm_prime.c
index 5b7b9110254b..dcde35231e25 100644
--- a/trunk/drivers/gpu/drm/drm_prime.c
+++ b/trunk/drivers/gpu/drm/drm_prime.c
@@ -190,7 +190,8 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
if (ret)
return ERR_PTR(ret);
}
- return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
+ return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
+ 0600);
}
EXPORT_SYMBOL(drm_gem_prime_export);
diff --git a/trunk/drivers/gpu/drm/drm_vm.c b/trunk/drivers/gpu/drm/drm_vm.c
index 67969e25d60f..1d4f7c9fe661 100644
--- a/trunk/drivers/gpu/drm/drm_vm.c
+++ b/trunk/drivers/gpu/drm/drm_vm.c
@@ -617,6 +617,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = drm_core_get_reg_ofs(dev);
+ vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
diff --git a/trunk/drivers/gpu/drm/i810/i810_dma.c b/trunk/drivers/gpu/drm/i810/i810_dma.c
index ada49eda489f..004ecdfe1b55 100644
--- a/trunk/drivers/gpu/drm/i810/i810_dma.c
+++ b/trunk/drivers/gpu/drm/i810/i810_dma.c
@@ -97,7 +97,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
buf = dev_priv->mmap_buffer;
buf_priv = buf->dev_private;
- vma->vm_flags |= VM_DONTCOPY;
+ vma->vm_flags |= (VM_IO | VM_DONTCOPY);
buf_priv->currently_mapped = I810_BUF_MAPPED;
diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h
index 9669a0b8b440..b9d00dcf9a2d 100644
--- a/trunk/drivers/gpu/drm/i915/i915_drv.h
+++ b/trunk/drivers/gpu/drm/i915/i915_drv.h
@@ -1697,8 +1697,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-void i915_gem_restore_fences(struct drm_device *dev);
-
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c
index 9e35dafc5807..970ad17c99ab 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem.c
@@ -1801,14 +1801,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp &= ~(__GFP_IO | __GFP_WAIT);
}
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- st->nents++;
- sg_set_page(sg, page, PAGE_SIZE, 0);
- sg = sg_next(sg);
- continue;
- }
-#endif
+
if (!i || page_to_pfn(page) != last_pfn + 1) {
if (i)
sg = sg_next(sg);
@@ -1819,10 +1812,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
}
last_pfn = page_to_pfn(page);
}
-#ifdef CONFIG_SWIOTLB
- if (!swiotlb_nr_tbl())
-#endif
- sg_mark_end(sg);
+
+ sg_mark_end(sg);
obj->pages = st;
if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -2126,15 +2117,25 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
}
}
-void i915_gem_restore_fences(struct drm_device *dev)
+static void i915_gem_reset_fences(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- i915_gem_write_fence(dev, i, reg->obj);
+
+ if (reg->obj)
+ i915_gem_object_fence_lost(reg->obj);
+
+ i915_gem_write_fence(dev, i, NULL);
+
+ reg->pin_count = 0;
+ reg->obj = NULL;
+ INIT_LIST_HEAD(®->lru_list);
}
+
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
}
void i915_gem_reset(struct drm_device *dev)
@@ -2157,7 +2158,8 @@ void i915_gem_reset(struct drm_device *dev)
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
- i915_gem_restore_fences(dev);
+ /* The fence registers are invalidated so clear them out */
+ i915_gem_reset_fences(dev);
}
/**
@@ -3863,6 +3865,8 @@ i915_gem_idle(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
+ i915_gem_reset_fences(dev);
+
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound mm.suspended!
@@ -4189,8 +4193,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */
- INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- i915_gem_restore_fences(dev);
+ i915_gem_reset_fences(dev);
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
diff --git a/trunk/drivers/gpu/drm/i915/i915_suspend.c b/trunk/drivers/gpu/drm/i915/i915_suspend.c
index 369b3d8776ab..41f0fdecfbdc 100644
--- a/trunk/drivers/gpu/drm/i915/i915_suspend.c
+++ b/trunk/drivers/gpu/drm/i915/i915_suspend.c
@@ -384,7 +384,6 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_fences(dev);
i915_restore_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c b/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c
index a30f29425c21..a4b71b25fa53 100644
--- a/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -171,11 +171,6 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
return -EINVAL;
- if (!access_ok(VERIFY_READ,
- (void *)(unsigned long)user_cmd.command,
- user_cmd.command_size))
- return -EFAULT;
-
ret = qxl_alloc_release_reserved(qdev,
sizeof(union qxl_release_info) +
user_cmd.command_size,
diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c
index 6948eb88c2b7..0e5341695922 100644
--- a/trunk/drivers/gpu/drm/radeon/r600.c
+++ b/trunk/drivers/gpu/drm/radeon/r600.c
@@ -2687,9 +2687,6 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev)
int r600_uvd_init(struct radeon_device *rdev)
{
int i, j, r;
- /* disable byte swapping */
- u32 lmi_swap_cntl = 0;
- u32 mp_swap_cntl = 0;
/* raise clocks while booting up the VCPU */
radeon_set_uvd_clocks(rdev, 53300, 40000);
@@ -2714,13 +2711,9 @@ int r600_uvd_init(struct radeon_device *rdev)
WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
(1 << 21) | (1 << 9) | (1 << 20));
-#ifdef __BIG_ENDIAN
- /* swap (8 in 32) RB and IB */
- lmi_swap_cntl = 0xa;
- mp_swap_cntl = 0;
-#endif
- WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
- WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
+ /* disable byte swapping */
+ WREG32(UVD_LMI_SWAP_CNTL, 0);
+ WREG32(UVD_MP_SWAP_CNTL, 0);
WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
WREG32(UVD_MPC_SET_MUXA1, 0x0);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c
index b0dc0b6cb4e0..189973836cff 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_device.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c
@@ -244,6 +244,16 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
*/
void radeon_wb_disable(struct radeon_device *rdev)
{
+ int r;
+
+ if (rdev->wb.wb_obj) {
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return;
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ }
rdev->wb.enabled = false;
}
@@ -259,11 +269,6 @@ void radeon_wb_fini(struct radeon_device *rdev)
{
radeon_wb_disable(rdev);
if (rdev->wb.wb_obj) {
- if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
- radeon_bo_kunmap(rdev->wb.wb_obj);
- radeon_bo_unpin(rdev->wb.wb_obj);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- }
radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
@@ -290,26 +295,26 @@ int radeon_wb_init(struct radeon_device *rdev)
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
}
- r = radeon_bo_reserve(rdev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- radeon_wb_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->wb.wb_obj);
- dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
- radeon_wb_fini(rdev);
- return r;
- }
- r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ }
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
+ if (r) {
radeon_bo_unreserve(rdev->wb.wb_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
- radeon_wb_fini(rdev);
- return r;
- }
+ dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+ radeon_wb_fini(rdev);
+ return r;
}
/* clear wb memory */
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fence.c b/trunk/drivers/gpu/drm/radeon/radeon_fence.c
index ddb8f8e04eb5..5b937dfe6f65 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_fence.c
@@ -63,9 +63,7 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
- if (drv->cpu_addr) {
- *drv->cpu_addr = cpu_to_le32(seq);
- }
+ *drv->cpu_addr = cpu_to_le32(seq);
} else {
WREG32(drv->scratch_reg, seq);
}
@@ -86,11 +84,7 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
u32 seq = 0;
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
- if (drv->cpu_addr) {
- seq = le32_to_cpu(*drv->cpu_addr);
- } else {
- seq = lower_32_bits(atomic64_read(&drv->last_seq));
- }
+ seq = le32_to_cpu(*drv->cpu_addr);
} else {
seq = RREG32(drv->scratch_reg);
}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gart.c b/trunk/drivers/gpu/drm/radeon/radeon_gart.c
index 43ec4a401f07..2c1341f63dc5 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1197,13 +1197,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
{
- int r = 0;
+ int r;
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&bo_va->vm->mutex);
- if (bo_va->soffset) {
- r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
- }
+ r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
mutex_unlock(&bo_va->vm->mutex);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ring.c b/trunk/drivers/gpu/drm/radeon/radeon_ring.c
index 82434018cbe8..e17faa7cf732 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_ring.c
@@ -402,13 +402,6 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
return -ENOMEM;
/* Align requested size with padding so unlock_commit can
* pad safely */
- radeon_ring_free_size(rdev, ring);
- if (ring->ring_free_dw == (ring->ring_size / 4)) {
- /* This is an empty ring update lockup info to avoid
- * false positive.
- */
- radeon_ring_lockup_update(ring);
- }
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
while (ndw > (ring->ring_free_dw - 1)) {
radeon_ring_free_size(rdev, ring);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_uvd.c b/trunk/drivers/gpu/drm/radeon/radeon_uvd.c
index cad735dd02c6..906e5c0ca3b9 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -159,17 +159,7 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
if (!r) {
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
radeon_bo_unpin(rdev->uvd.vcpu_bo);
- rdev->uvd.cpu_addr = NULL;
- if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
- radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
- }
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
-
- if (rdev->uvd.cpu_addr) {
- radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
- } else {
- rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
- }
}
return r;
}
@@ -188,10 +178,6 @@ int radeon_uvd_resume(struct radeon_device *rdev)
return r;
}
- /* Have been pin in cpu unmap unpin */
- radeon_bo_kunmap(rdev->uvd.vcpu_bo);
- radeon_bo_unpin(rdev->uvd.vcpu_bo);
-
r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
&rdev->uvd.gpu_addr);
if (r) {
@@ -627,19 +613,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
}
/* stitch together an UVD create msg */
- msg[0] = cpu_to_le32(0x00000de4);
- msg[1] = cpu_to_le32(0x00000000);
- msg[2] = cpu_to_le32(handle);
- msg[3] = cpu_to_le32(0x00000000);
- msg[4] = cpu_to_le32(0x00000000);
- msg[5] = cpu_to_le32(0x00000000);
- msg[6] = cpu_to_le32(0x00000000);
- msg[7] = cpu_to_le32(0x00000780);
- msg[8] = cpu_to_le32(0x00000440);
- msg[9] = cpu_to_le32(0x00000000);
- msg[10] = cpu_to_le32(0x01b37000);
+ msg[0] = 0x00000de4;
+ msg[1] = 0x00000000;
+ msg[2] = handle;
+ msg[3] = 0x00000000;
+ msg[4] = 0x00000000;
+ msg[5] = 0x00000000;
+ msg[6] = 0x00000000;
+ msg[7] = 0x00000780;
+ msg[8] = 0x00000440;
+ msg[9] = 0x00000000;
+ msg[10] = 0x01b37000;
for (i = 11; i < 1024; ++i)
- msg[i] = cpu_to_le32(0x0);
+ msg[i] = 0x0;
radeon_bo_kunmap(bo);
radeon_bo_unreserve(bo);
@@ -673,12 +659,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
}
/* stitch together an UVD destroy msg */
- msg[0] = cpu_to_le32(0x00000de4);
- msg[1] = cpu_to_le32(0x00000002);
- msg[2] = cpu_to_le32(handle);
- msg[3] = cpu_to_le32(0x00000000);
+ msg[0] = 0x00000de4;
+ msg[1] = 0x00000002;
+ msg[2] = handle;
+ msg[3] = 0x00000000;
for (i = 4; i < 1024; ++i)
- msg[i] = cpu_to_le32(0x0);
+ msg[i] = 0x0;
radeon_bo_kunmap(bo);
radeon_bo_unreserve(bo);
diff --git a/trunk/drivers/input/joystick/xpad.c b/trunk/drivers/input/joystick/xpad.c
index fa061d46527f..d6cbfe9df218 100644
--- a/trunk/drivers/input/joystick/xpad.c
+++ b/trunk/drivers/input/joystick/xpad.c
@@ -137,7 +137,7 @@ static const struct xpad_device {
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
- { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
diff --git a/trunk/drivers/input/keyboard/Kconfig b/trunk/drivers/input/keyboard/Kconfig
index 7ac9c9818d55..62a2c0e4cc99 100644
--- a/trunk/drivers/input/keyboard/Kconfig
+++ b/trunk/drivers/input/keyboard/Kconfig
@@ -431,7 +431,6 @@ config KEYBOARD_TEGRA
config KEYBOARD_OPENCORES
tristate "OpenCores Keyboard Controller"
- depends on HAS_IOMEM
help
Say Y here if you want to use the OpenCores Keyboard Controller
http://www.opencores.org/project,keyboardcontroller
diff --git a/trunk/drivers/input/serio/Kconfig b/trunk/drivers/input/serio/Kconfig
index 1bda828f4b55..aebfe3ecb945 100644
--- a/trunk/drivers/input/serio/Kconfig
+++ b/trunk/drivers/input/serio/Kconfig
@@ -205,7 +205,6 @@ config SERIO_XILINX_XPS_PS2
config SERIO_ALTERA_PS2
tristate "Altera UP PS/2 controller"
- depends on HAS_IOMEM
help
Say Y here if you have Altera University Program PS/2 ports.
diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c
index 384fbcd0cee0..518282da6d85 100644
--- a/trunk/drivers/input/tablet/wacom_wac.c
+++ b/trunk/drivers/input/tablet/wacom_wac.c
@@ -363,7 +363,6 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
case 0x160802: /* Cintiq 13HD Pro Pen */
case 0x180802: /* DTH2242 Pen */
- case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
wacom->tool[idx] = BTN_TOOL_PEN;
break;
@@ -402,7 +401,6 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
case 0x18080a: /* DTH2242 Eraser */
- case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
wacom->tool[idx] = BTN_TOOL_RUBBER;
break;
diff --git a/trunk/drivers/input/touchscreen/cyttsp_core.c b/trunk/drivers/input/touchscreen/cyttsp_core.c
index ae89d2609ab0..8e60437ac85b 100644
--- a/trunk/drivers/input/touchscreen/cyttsp_core.c
+++ b/trunk/drivers/input/touchscreen/cyttsp_core.c
@@ -116,15 +116,6 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
}
-static int cyttsp_handshake(struct cyttsp *ts)
-{
- if (ts->pdata->use_hndshk)
- return ttsp_send_command(ts,
- ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
-
- return 0;
-}
-
static int cyttsp_load_bl_regs(struct cyttsp *ts)
{
memset(&ts->bl_data, 0, sizeof(ts->bl_data));
@@ -142,7 +133,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
memcpy(bl_cmd, bl_command, sizeof(bl_command));
if (ts->pdata->bl_keys)
memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
- ts->pdata->bl_keys, CY_NUM_BL_KEYS);
+ ts->pdata->bl_keys, sizeof(bl_command));
error = ttsp_write_block_data(ts, CY_REG_BASE,
sizeof(bl_cmd), bl_cmd);
@@ -176,10 +167,6 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts)
if (error)
return error;
- error = cyttsp_handshake(ts);
- if (error)
- return error;
-
return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0;
}
@@ -201,10 +188,6 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts)
if (error)
return error;
- error = cyttsp_handshake(ts);
- if (error)
- return error;
-
if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl)
return -EIO;
@@ -361,9 +344,12 @@ static irqreturn_t cyttsp_irq(int irq, void *handle)
goto out;
/* provide flow control handshake */
- error = cyttsp_handshake(ts);
- if (error)
- goto out;
+ if (ts->pdata->use_hndshk) {
+ error = ttsp_send_command(ts,
+ ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
+ if (error)
+ goto out;
+ }
if (unlikely(ts->state == CY_IDLE_STATE))
goto out;
diff --git a/trunk/drivers/input/touchscreen/cyttsp_core.h b/trunk/drivers/input/touchscreen/cyttsp_core.h
index f1ebde369f86..1aa3c6967e70 100644
--- a/trunk/drivers/input/touchscreen/cyttsp_core.h
+++ b/trunk/drivers/input/touchscreen/cyttsp_core.h
@@ -67,8 +67,8 @@ struct cyttsp_xydata {
/* TTSP System Information interface definition */
struct cyttsp_sysinfo_data {
u8 hst_mode;
- u8 mfg_stat;
u8 mfg_cmd;
+ u8 mfg_stat;
u8 cid[3];
u8 tt_undef1;
u8 uid[8];
diff --git a/trunk/drivers/irqchip/irq-gic.c b/trunk/drivers/irqchip/irq-gic.c
index 19ceaa60e0f4..1760ceb68b7b 100644
--- a/trunk/drivers/irqchip/irq-gic.c
+++ b/trunk/drivers/irqchip/irq-gic.c
@@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ if (action == CPU_STARTING)
gic_cpu_init(&gic_data[0]);
return NOTIFY_OK;
}
diff --git a/trunk/drivers/media/Kconfig b/trunk/drivers/media/Kconfig
index 8270388e2a0d..7f5a7cac6dc7 100644
--- a/trunk/drivers/media/Kconfig
+++ b/trunk/drivers/media/Kconfig
@@ -136,9 +136,9 @@ config DVB_NET
# This Kconfig option is used by both PCI and USB drivers
config TTPCI_EEPROM
- tristate
- depends on I2C
- default n
+ tristate
+ depends on I2C
+ default n
source "drivers/media/dvb-core/Kconfig"
@@ -189,12 +189,6 @@ config MEDIA_SUBDRV_AUTOSELECT
If unsure say Y.
-config MEDIA_ATTACH
- bool
- depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
- depends on MODULES
- default MODULES
-
source "drivers/media/i2c/Kconfig"
source "drivers/media/tuners/Kconfig"
source "drivers/media/dvb-frontends/Kconfig"
diff --git a/trunk/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/trunk/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 9eac5310942f..cb52438e53ac 100644
--- a/trunk/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/trunk/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -956,7 +956,7 @@ static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
if (fie->pad != OIF_SOURCE_PAD)
return -EINVAL;
- if (fie->index >= ARRAY_SIZE(s5c73m3_intervals))
+ if (fie->index > ARRAY_SIZE(s5c73m3_intervals))
return -EINVAL;
mutex_lock(&state->lock);
diff --git a/trunk/drivers/media/pci/cx88/cx88-alsa.c b/trunk/drivers/media/pci/cx88/cx88-alsa.c
index aba5b1c649e6..27d62623274b 100644
--- a/trunk/drivers/media/pci/cx88/cx88-alsa.c
+++ b/trunk/drivers/media/pci/cx88/cx88-alsa.c
@@ -615,7 +615,7 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
int changed = 0;
u32 old;
- if (core->sd_wm8775)
+ if (core->board.audio_chip == V4L2_IDENT_WM8775)
snd_cx88_wm8775_volume_put(kcontrol, value);
left = value->value.integer.value[0] & 0x3f;
@@ -682,7 +682,8 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
vol ^= bit;
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
/* Pass mute onto any WM8775 */
- if (core->sd_wm8775 && ((1<<6) == bit))
+ if ((core->board.audio_chip == V4L2_IDENT_WM8775) &&
+ ((1<<6) == bit))
wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
ret = 1;
}
@@ -902,7 +903,7 @@ static int cx88_audio_initdev(struct pci_dev *pci,
goto error;
/* If there's a wm8775 then add a Line-In ALC switch */
- if (core->sd_wm8775)
+ if (core->board.audio_chip == V4L2_IDENT_WM8775)
snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
strcpy (card->driver, "CX88x");
diff --git a/trunk/drivers/media/pci/cx88/cx88-video.c b/trunk/drivers/media/pci/cx88/cx88-video.c
index c7a9be1065c0..1b00615fd395 100644
--- a/trunk/drivers/media/pci/cx88/cx88-video.c
+++ b/trunk/drivers/media/pci/cx88/cx88-video.c
@@ -385,7 +385,8 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
/* The wm8775 module has the "2" route hardwired into
the initialization. Some boards may use different
routes for different inputs. HVR-1300 surely does */
- if (core->sd_wm8775) {
+ if (core->board.audio_chip &&
+ core->board.audio_chip == V4L2_IDENT_WM8775) {
call_all(core, audio, s_routing,
INPUT(input).audioroute, 0, 0);
}
@@ -770,7 +771,8 @@ static int video_open(struct file *file)
cx_write(MO_GP1_IO, core->board.radio.gpio1);
cx_write(MO_GP2_IO, core->board.radio.gpio2);
if (core->board.radio.audioroute) {
- if (core->sd_wm8775) {
+ if(core->board.audio_chip &&
+ core->board.audio_chip == V4L2_IDENT_WM8775) {
call_all(core, audio, s_routing,
core->board.radio.audioroute, 0, 0);
}
@@ -957,7 +959,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
u32 value,mask;
/* Pass changes onto any WM8775 */
- if (core->sd_wm8775) {
+ if (core->board.audio_chip == V4L2_IDENT_WM8775) {
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
wm8775_s_ctrl(core, ctrl->id, ctrl->val);
diff --git a/trunk/drivers/media/platform/coda.c b/trunk/drivers/media/platform/coda.c
index 9d1481a60bd9..48b8d7af386d 100644
--- a/trunk/drivers/media/platform/coda.c
+++ b/trunk/drivers/media/platform/coda.c
@@ -576,14 +576,6 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
-static int vidioc_create_bufs(struct file *file, void *priv,
- struct v4l2_create_buffers *create)
-{
- struct coda_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
-}
-
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
@@ -618,7 +610,6 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_create_bufs = vidioc_create_bufs,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
diff --git a/trunk/drivers/media/platform/davinci/vpbe_display.c b/trunk/drivers/media/platform/davinci/vpbe_display.c
index d0b375cf565f..1802f11e939f 100644
--- a/trunk/drivers/media/platform/davinci/vpbe_display.c
+++ b/trunk/drivers/media/platform/davinci/vpbe_display.c
@@ -916,21 +916,6 @@ static int vpbe_display_s_fmt(struct file *file, void *priv,
other video window */
layer->pix_fmt = *pixfmt;
- if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) {
- struct vpbe_layer *otherlayer;
-
- otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer);
- /* if other layer is available, only
- * claim it, do not configure it
- */
- ret = osd_device->ops.request_layer(osd_device,
- otherlayer->layer_info.id);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Display Manager failed to allocate layer\n");
- return -EBUSY;
- }
- }
/* Get osd layer config */
osd_device->ops.get_layer_config(osd_device,
diff --git a/trunk/drivers/media/platform/davinci/vpfe_capture.c b/trunk/drivers/media/platform/davinci/vpfe_capture.c
index 93609091cb23..8c50d3074866 100644
--- a/trunk/drivers/media/platform/davinci/vpfe_capture.c
+++ b/trunk/drivers/media/platform/davinci/vpfe_capture.c
@@ -1837,7 +1837,7 @@ static int vpfe_probe(struct platform_device *pdev)
if (NULL == ccdc_cfg) {
v4l2_err(pdev->dev.driver,
"Memory allocation failed for ccdc_cfg\n");
- goto probe_free_dev_mem;
+ goto probe_free_lock;
}
mutex_lock(&ccdc_lock);
@@ -1991,6 +1991,7 @@ static int vpfe_probe(struct platform_device *pdev)
free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
probe_free_ccdc_cfg_mem:
kfree(ccdc_cfg);
+probe_free_lock:
mutex_unlock(&ccdc_lock);
probe_free_dev_mem:
kfree(vpfe_dev);
diff --git a/trunk/drivers/media/platform/exynos4-is/fimc-is-regs.c b/trunk/drivers/media/platform/exynos4-is/fimc-is-regs.c
index d05eaa2c8490..b0ff67bc1b05 100644
--- a/trunk/drivers/media/platform/exynos4-is/fimc-is-regs.c
+++ b/trunk/drivers/media/platform/exynos4-is/fimc-is-regs.c
@@ -174,7 +174,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is)
HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
};
- if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
+ if (WARN_ON(is->config_index > ARRAY_SIZE(cmd)))
return -EINVAL;
mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
diff --git a/trunk/drivers/media/platform/exynos4-is/fimc-is.c b/trunk/drivers/media/platform/exynos4-is/fimc-is.c
index 0741945b79ed..47c6363d04e2 100644
--- a/trunk/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/trunk/drivers/media/platform/exynos4-is/fimc-is.c
@@ -48,6 +48,7 @@ static char *fimc_is_clocks[ISS_CLKS_MAX] = {
[ISS_CLK_LITE0] = "lite0",
[ISS_CLK_LITE1] = "lite1",
[ISS_CLK_MPLL] = "mpll",
+ [ISS_CLK_SYSREG] = "sysreg",
[ISS_CLK_ISP] = "isp",
[ISS_CLK_DRC] = "drc",
[ISS_CLK_FD] = "fd",
@@ -70,6 +71,7 @@ static void fimc_is_put_clocks(struct fimc_is *is)
for (i = 0; i < ISS_CLKS_MAX; i++) {
if (IS_ERR(is->clocks[i]))
continue;
+ clk_unprepare(is->clocks[i]);
clk_put(is->clocks[i]);
is->clocks[i] = ERR_PTR(-EINVAL);
}
@@ -88,6 +90,12 @@ static int fimc_is_get_clocks(struct fimc_is *is)
ret = PTR_ERR(is->clocks[i]);
goto err;
}
+ ret = clk_prepare(is->clocks[i]);
+ if (ret < 0) {
+ clk_put(is->clocks[i]);
+ is->clocks[i] = ERR_PTR(-EINVAL);
+ goto err;
+ }
}
return 0;
@@ -95,7 +103,7 @@ static int fimc_is_get_clocks(struct fimc_is *is)
fimc_is_put_clocks(is);
dev_err(&is->pdev->dev, "failed to get clock: %s\n",
fimc_is_clocks[i]);
- return ret;
+ return -ENXIO;
}
static int fimc_is_setup_clocks(struct fimc_is *is)
@@ -136,7 +144,7 @@ int fimc_is_enable_clocks(struct fimc_is *is)
for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
if (IS_ERR(is->clocks[i]))
continue;
- ret = clk_prepare_enable(is->clocks[i]);
+ ret = clk_enable(is->clocks[i]);
if (ret < 0) {
dev_err(&is->pdev->dev, "clock %s enable failed\n",
fimc_is_clocks[i]);
@@ -155,7 +163,7 @@ void fimc_is_disable_clocks(struct fimc_is *is)
for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
if (!IS_ERR(is->clocks[i])) {
- clk_disable_unprepare(is->clocks[i]);
+ clk_disable(is->clocks[i]);
pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
}
}
@@ -318,11 +326,6 @@ int fimc_is_start_firmware(struct fimc_is *is)
struct device *dev = &is->pdev->dev;
int ret;
- if (is->fw.f_w == NULL) {
- dev_err(dev, "firmware is not loaded\n");
- return -EINVAL;
- }
-
memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
wmb();
@@ -834,11 +837,23 @@ static int fimc_is_probe(struct platform_device *pdev)
goto err_clk;
}
pm_runtime_enable(dev);
-
+ /*
+ * Enable only the ISP power domain, keep FIMC-IS clocks off until
+ * the whole clock tree is configured. The ISP power domain needs
+ * be active in order to acces any CMU_ISP clock registers.
+ */
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_irq;
+ ret = fimc_is_setup_clocks(is);
+ pm_runtime_put_sync(dev);
+
+ if (ret < 0)
+ goto err_irq;
+
+ is->clk_init = true;
+
is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
if (IS_ERR(is->alloc_ctx)) {
ret = PTR_ERR(is->alloc_ctx);
@@ -860,8 +875,6 @@ static int fimc_is_probe(struct platform_device *pdev)
if (ret < 0)
goto err_dfs;
- pm_runtime_put_sync(dev);
-
dev_dbg(dev, "FIMC-IS registered successfully\n");
return 0;
@@ -881,11 +894,9 @@ static int fimc_is_probe(struct platform_device *pdev)
static int fimc_is_runtime_resume(struct device *dev)
{
struct fimc_is *is = dev_get_drvdata(dev);
- int ret;
- ret = fimc_is_setup_clocks(is);
- if (ret)
- return ret;
+ if (!is->clk_init)
+ return 0;
return fimc_is_enable_clocks(is);
}
@@ -894,7 +905,9 @@ static int fimc_is_runtime_suspend(struct device *dev)
{
struct fimc_is *is = dev_get_drvdata(dev);
- fimc_is_disable_clocks(is);
+ if (is->clk_init)
+ fimc_is_disable_clocks(is);
+
return 0;
}
@@ -928,8 +941,7 @@ static int fimc_is_remove(struct platform_device *pdev)
vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
fimc_is_put_clocks(is);
fimc_is_debugfs_remove(is);
- if (is->fw.f_w)
- release_firmware(is->fw.f_w);
+ release_firmware(is->fw.f_w);
fimc_is_free_cpu_memory(is);
return 0;
diff --git a/trunk/drivers/media/platform/exynos4-is/fimc-is.h b/trunk/drivers/media/platform/exynos4-is/fimc-is.h
index d7db133b493f..f5275a5b0156 100644
--- a/trunk/drivers/media/platform/exynos4-is/fimc-is.h
+++ b/trunk/drivers/media/platform/exynos4-is/fimc-is.h
@@ -73,6 +73,7 @@ enum {
ISS_CLK_LITE0,
ISS_CLK_LITE1,
ISS_CLK_MPLL,
+ ISS_CLK_SYSREG,
ISS_CLK_ISP,
ISS_CLK_DRC,
ISS_CLK_FD,
@@ -264,6 +265,7 @@ struct fimc_is {
spinlock_t slock;
struct clk *clocks[ISS_CLKS_MAX];
+ bool clk_init;
void __iomem *regs;
void __iomem *pmu_regs;
int irq;
diff --git a/trunk/drivers/media/platform/exynos4-is/fimc-isp.c b/trunk/drivers/media/platform/exynos4-is/fimc-isp.c
index 7ede30b5910f..d63947f7b302 100644
--- a/trunk/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/trunk/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -138,7 +138,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
return 0;
}
- mf->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
mutex_lock(&isp->subdev_lock);
__is_get_frame_size(is, &cur_fmt);
@@ -194,7 +194,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n",
__func__, fmt->pad, mf->code, mf->width, mf->height);
- mf->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
mutex_lock(&isp->subdev_lock);
__isp_subdev_try_format(isp, fmt);
diff --git a/trunk/drivers/media/platform/exynos4-is/mipi-csis.c b/trunk/drivers/media/platform/exynos4-is/mipi-csis.c
index 254d70fe762a..a2eda9d5ac87 100644
--- a/trunk/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/trunk/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -746,7 +746,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
node = v4l2_of_get_next_endpoint(node, NULL);
if (!node) {
dev_err(&pdev->dev, "No port node at %s\n",
- pdev->dev.of_node->full_name);
+ node->full_name);
return -EINVAL;
}
/* Get port node and validate MIPI-CSI channel id. */
diff --git a/trunk/drivers/media/platform/s3c-camif/camif-core.h b/trunk/drivers/media/platform/s3c-camif/camif-core.h
index 35d2fcdc0036..261134baa655 100644
--- a/trunk/drivers/media/platform/s3c-camif/camif-core.h
+++ b/trunk/drivers/media/platform/s3c-camif/camif-core.h
@@ -229,7 +229,7 @@ struct camif_vp {
unsigned int state;
u16 fmt_flags;
u8 id;
- u16 rotation;
+ u8 rotation;
u8 hflip;
u8 vflip;
unsigned int offset;
diff --git a/trunk/drivers/media/platform/s5p-jpeg/Makefile b/trunk/drivers/media/platform/s5p-jpeg/Makefile
index d18cb5edd2d5..ddc2900d88a2 100644
--- a/trunk/drivers/media/platform/s5p-jpeg/Makefile
+++ b/trunk/drivers/media/platform/s5p-jpeg/Makefile
@@ -1,2 +1,2 @@
s5p-jpeg-objs := jpeg-core.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o
diff --git a/trunk/drivers/media/platform/s5p-mfc/Makefile b/trunk/drivers/media/platform/s5p-mfc/Makefile
index 15f59b324fef..379008c6d09a 100644
--- a/trunk/drivers/media/platform/s5p-mfc/Makefile
+++ b/trunk/drivers/media/platform/s5p-mfc/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o
s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o
s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o
diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc.c
index d12faa691af8..01f9ae0dadb0 100644
--- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -397,7 +397,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
leave_handle_frame:
spin_unlock_irqrestore(&dev->irqlock, flags);
if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
- || ctx->dst_queue_cnt < ctx->pb_count)
+ || ctx->dst_queue_cnt < ctx->dpb_count)
clear_work_bit(ctx);
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
wake_up_ctx(ctx, reason, err);
@@ -473,7 +473,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
- ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
+ ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
dev);
ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
dev);
@@ -562,7 +562,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *mb_entry;
- mfc_debug(2, "Stream completed\n");
+ mfc_debug(2, "Stream completed");
s5p_mfc_clear_int_flags(dev);
ctx->int_type = reason;
@@ -1362,6 +1362,7 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = {
.port_num = MFC_NUM_PORTS,
.buf_size = &buf_size_v5,
.buf_align = &mfc_buf_align_v5,
+ .mclk_name = "sclk_mfc",
.fw_name = "s5p-mfc.fw",
};
@@ -1388,6 +1389,7 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
.port_num = MFC_NUM_PORTS_V6,
.buf_size = &buf_size_v6,
.buf_align = &mfc_buf_align_v6,
+ .mclk_name = "aclk_333",
.fw_name = "s5p-mfc-v6.fw",
};
diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index ef4074cd5316..202d1d7a37a8 100644
--- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -138,7 +138,6 @@ enum s5p_mfc_inst_state {
MFCINST_INIT = 100,
MFCINST_GOT_INST,
MFCINST_HEAD_PARSED,
- MFCINST_HEAD_PRODUCED,
MFCINST_BUFS_SET,
MFCINST_RUNNING,
MFCINST_FINISHING,
@@ -232,6 +231,7 @@ struct s5p_mfc_variant {
unsigned int port_num;
struct s5p_mfc_buf_size *buf_size;
struct s5p_mfc_buf_align *buf_align;
+ char *mclk_name;
char *fw_name;
};
@@ -438,7 +438,7 @@ struct s5p_mfc_enc_params {
u32 rc_framerate_num;
u32 rc_framerate_denom;
- struct {
+ union {
struct s5p_mfc_h264_enc_params h264;
struct s5p_mfc_mpeg4_enc_params mpeg4;
} codec;
@@ -602,7 +602,7 @@ struct s5p_mfc_ctx {
int after_packed_pb;
int sei_fp_parse;
- int pb_count;
+ int dpb_count;
int total_dpb_count;
int mv_count;
/* Buffers */
diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index dc1fc94a488d..2e5f30b40dea 100644
--- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -38,7 +38,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size,
&dev->bank1, GFP_KERNEL);
- if (IS_ERR_OR_NULL(dev->fw_virt_addr)) {
+ if (IS_ERR(dev->fw_virt_addr)) {
dev->fw_virt_addr = NULL;
mfc_err("Allocating bitprocessor buffer failed\n");
return -ENOMEM;
diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
index 8e608f5aa0d7..bd5cd4ae993c 100644
--- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
+++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
@@ -30,8 +30,8 @@ extern int debug;
#define mfc_debug(level, fmt, args...)
#endif
-#define mfc_debug_enter() mfc_debug(5, "enter\n")
-#define mfc_debug_leave() mfc_debug(5, "leave\n")
+#define mfc_debug_enter() mfc_debug(5, "enter")
+#define mfc_debug_leave() mfc_debug(5, "leave")
#define mfc_err(fmt, args...) \
do { \
diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 00b07032f4f0..4af53bd2f182 100644
--- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -210,11 +210,11 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
/* Context is to decode a frame */
if (ctx->src_queue_cnt >= 1 &&
ctx->state == MFCINST_RUNNING &&
- ctx->dst_queue_cnt >= ctx->pb_count)
+ ctx->dst_queue_cnt >= ctx->dpb_count)
return 1;
/* Context is to return last frame */
if (ctx->state == MFCINST_FINISHING &&
- ctx->dst_queue_cnt >= ctx->pb_count)
+ ctx->dst_queue_cnt >= ctx->dpb_count)
return 1;
/* Context is to set buffers */
if (ctx->src_queue_cnt >= 1 &&
@@ -224,7 +224,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
/* Resolution change */
if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
- ctx->dst_queue_cnt >= ctx->pb_count)
+ ctx->dst_queue_cnt >= ctx->dpb_count)
return 1;
if (ctx->state == MFCINST_RES_CHANGE_END &&
ctx->src_queue_cnt >= 1)
@@ -537,7 +537,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
mfc_err("vb2_reqbufs on capture failed\n");
return ret;
}
- if (reqbufs->count < ctx->pb_count) {
+ if (reqbufs->count < ctx->dpb_count) {
mfc_err("Not enough buffers allocated\n");
reqbufs->count = 0;
s5p_mfc_clock_on();
@@ -751,7 +751,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
if (ctx->state >= MFCINST_HEAD_PARSED &&
ctx->state < MFCINST_ABORT) {
- ctrl->val = ctx->pb_count;
+ ctrl->val = ctx->dpb_count;
break;
} else if (ctx->state != MFCINST_INIT) {
v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
@@ -763,7 +763,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
if (ctx->state >= MFCINST_HEAD_PARSED &&
ctx->state < MFCINST_ABORT) {
- ctrl->val = ctx->pb_count;
+ ctrl->val = ctx->dpb_count;
} else {
v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
return -EINVAL;
@@ -924,10 +924,10 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
/* Output plane count is 2 - one for Y and one for CbCr */
*plane_count = 2;
/* Setup buffer count */
- if (*buf_count < ctx->pb_count)
- *buf_count = ctx->pb_count;
- if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB)
- *buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB;
+ if (*buf_count < ctx->dpb_count)
+ *buf_count = ctx->dpb_count;
+ if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB)
+ *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB;
if (*buf_count > MFC_MAX_BUFFERS)
*buf_count = MFC_MAX_BUFFERS;
} else {
diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2549967b2f85..4f6b553c4b2d 100644
--- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -592,7 +592,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
return 1;
/* context is ready to encode a frame */
if ((ctx->state == MFCINST_RUNNING ||
- ctx->state == MFCINST_HEAD_PRODUCED) &&
+ ctx->state == MFCINST_HEAD_PARSED) &&
ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
return 1;
/* context is ready to encode remaining frames */
@@ -649,7 +649,6 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
struct s5p_mfc_enc_params *p = &ctx->enc_params;
struct s5p_mfc_buf *dst_mb;
unsigned long flags;
- unsigned int enc_pb_count;
if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
spin_lock_irqsave(&dev->irqlock, flags);
@@ -662,20 +661,19 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
spin_unlock_irqrestore(&dev->irqlock, flags);
}
-
- if (!IS_MFCV6(dev)) {
+ if (IS_MFCV6(dev)) {
+ ctx->state = MFCINST_HEAD_PARSED; /* for INIT_BUFFER cmd */
+ } else {
ctx->state = MFCINST_RUNNING;
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
- } else {
- enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops,
- get_enc_dpb_count, dev);
- if (ctx->pb_count < enc_pb_count)
- ctx->pb_count = enc_pb_count;
- ctx->state = MFCINST_HEAD_PRODUCED;
}
+ if (IS_MFCV6(dev))
+ ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops,
+ get_enc_dpb_count, dev);
+
return 0;
}
@@ -719,9 +717,9 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
- mfc_debug(2, "Encoded slice type: %d\n", slice_type);
- mfc_debug(2, "Encoded stream size: %d\n", strm_size);
- mfc_debug(2, "Display order: %d\n",
+ mfc_debug(2, "Encoded slice type: %d", slice_type);
+ mfc_debug(2, "Encoded stream size: %d", strm_size);
+ mfc_debug(2, "Display order: %d",
mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
spin_lock_irqsave(&dev->irqlock, flags);
if (slice_type >= 0) {
@@ -1057,13 +1055,15 @@ static int vidioc_reqbufs(struct file *file, void *priv,
}
ctx->capture_state = QUEUE_BUFS_REQUESTED;
- ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
- alloc_codec_buffers, ctx);
- if (ret) {
- mfc_err("Failed to allocate encoding buffers\n");
- reqbufs->count = 0;
- ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
- return -ENOMEM;
+ if (!IS_MFCV6(dev)) {
+ ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
+ alloc_codec_buffers, ctx);
+ if (ret) {
+ mfc_err("Failed to allocate encoding buffers\n");
+ reqbufs->count = 0;
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ return -ENOMEM;
+ }
}
} else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (ctx->output_state != QUEUE_FREE) {
@@ -1071,19 +1071,6 @@ static int vidioc_reqbufs(struct file *file, void *priv,
ctx->output_state);
return -EINVAL;
}
-
- if (IS_MFCV6(dev)) {
- /* Check for min encoder buffers */
- if (ctx->pb_count &&
- (reqbufs->count < ctx->pb_count)) {
- reqbufs->count = ctx->pb_count;
- mfc_debug(2, "Minimum %d output buffers needed\n",
- ctx->pb_count);
- } else {
- ctx->pb_count = reqbufs->count;
- }
- }
-
ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
if (ret != 0) {
mfc_err("error in vb2_reqbufs() for E(S)\n");
@@ -1546,14 +1533,14 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
spin_lock_irqsave(&dev->irqlock, flags);
if (list_empty(&ctx->src_queue)) {
- mfc_debug(2, "EOS: empty src queue, entering finishing state\n");
+ mfc_debug(2, "EOS: empty src queue, entering finishing state");
ctx->state = MFCINST_FINISHING;
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
spin_unlock_irqrestore(&dev->irqlock, flags);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
} else {
- mfc_debug(2, "EOS: marking last buffer of stream\n");
+ mfc_debug(2, "EOS: marking last buffer of stream");
buf = list_entry(ctx->src_queue.prev,
struct s5p_mfc_buf, list);
if (buf->flags & MFC_BUF_FLAG_USED)
@@ -1622,9 +1609,9 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
mfc_err("failed to get plane cookie\n");
return -EINVAL;
}
- mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx\n",
- vb->v4l2_buf.index, i,
- vb2_dma_contig_plane_dma_addr(vb, i));
+ mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx",
+ vb->v4l2_buf.index, i,
+ vb2_dma_contig_plane_dma_addr(vb, i));
}
return 0;
}
@@ -1773,27 +1760,11 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
struct s5p_mfc_dev *dev = ctx->dev;
- if (IS_MFCV6(dev) && (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
-
- if ((ctx->state == MFCINST_GOT_INST) &&
- (dev->curr_ctx == ctx->num) && dev->hw_lock) {
- s5p_mfc_wait_for_done_ctx(ctx,
- S5P_MFC_R2H_CMD_SEQ_DONE_RET,
- 0);
- }
-
- if (ctx->src_bufs_cnt < ctx->pb_count) {
- mfc_err("Need minimum %d OUTPUT buffers\n",
- ctx->pb_count);
- return -EINVAL;
- }
- }
-
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
/* If context is ready then dev = work->data;schedule it to run */
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
-
return 0;
}
@@ -1949,7 +1920,6 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
if (controls[i].is_volatile && ctx->ctrls[i])
ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
}
- v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
return 0;
}
diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 368582b091bf..0af05a2d1cd4 100644
--- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1275,8 +1275,8 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
s5p_mfc_clean_ctx_int_flags(ctx);
- mfc_debug(2, "encoding buffer with index=%d state=%d\n",
- src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
+ mfc_debug(2, "encoding buffer with index=%d state=%d",
+ src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
s5p_mfc_encode_one_frame_v5(ctx);
return 0;
}
diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 66f0d042357f..7e76fce2e524 100644
--- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -62,6 +62,12 @@ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx)
/* NOP */
}
+static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
+{
+ /* NOP */
+ return -1;
+}
+
/* Allocate codec buffers */
static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
{
@@ -161,7 +167,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
ctx->bank1.size =
ctx->scratch_buf_size + ctx->tmv_buffer_size +
- (ctx->pb_count * (ctx->luma_dpb_size +
+ (ctx->dpb_count * (ctx->luma_dpb_size +
ctx->chroma_dpb_size + ctx->me_buffer_size));
ctx->bank2.size = 0;
break;
@@ -175,7 +181,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
ctx->bank1.size =
ctx->scratch_buf_size + ctx->tmv_buffer_size +
- (ctx->pb_count * (ctx->luma_dpb_size +
+ (ctx->dpb_count * (ctx->luma_dpb_size +
ctx->chroma_dpb_size + ctx->me_buffer_size));
ctx->bank2.size = 0;
break;
@@ -192,6 +198,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
}
BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
}
+
return 0;
}
@@ -442,8 +449,8 @@ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */
WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6);
- mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n",
- addr, size);
+ mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d",
+ addr, size);
return 0;
}
@@ -456,8 +463,8 @@ static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */
WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6);
- mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr);
- mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr);
+ mfc_debug(2, "enc src y buf addr: 0x%08lx", y_addr);
+ mfc_debug(2, "enc src c buf addr: 0x%08lx", c_addr);
}
static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
@@ -472,8 +479,8 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6);
enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6);
- mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr);
- mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr);
+ mfc_debug(2, "recon y addr: 0x%08lx", enc_recon_y_addr);
+ mfc_debug(2, "recon c addr: 0x%08lx", enc_recon_c_addr);
}
/* Set encoding ref & codec buffer */
@@ -490,7 +497,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
- for (i = 0; i < ctx->pb_count; i++) {
+ for (i = 0; i < ctx->dpb_count; i++) {
WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i));
buf_addr1 += ctx->luma_dpb_size;
WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i));
@@ -513,7 +520,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
buf_size1 -= ctx->tmv_buffer_size;
mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n",
- buf_addr1, buf_size1, ctx->pb_count);
+ buf_addr1, buf_size1, ctx->dpb_count);
if (buf_size1 < 0) {
mfc_debug(2, "Not enough memory has been allocated.\n");
return -ENOMEM;
@@ -1424,8 +1431,8 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
- mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
- mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
+ mfc_debug(2, "enc src y addr: 0x%08lx", src_y_addr);
+ mfc_debug(2, "enc src c addr: 0x%08lx", src_c_addr);
s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr);
@@ -1515,6 +1522,22 @@ static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx)
struct s5p_mfc_dev *dev = ctx->dev;
int ret;
+ ret = s5p_mfc_alloc_codec_buffers_v6(ctx);
+ if (ret) {
+ mfc_err("Failed to allocate encoding buffers.\n");
+ return -ENOMEM;
+ }
+
+ /* Header was generated now starting processing
+ * First set the reference frame buffers
+ */
+ if (ctx->capture_state != QUEUE_BUFS_REQUESTED) {
+ mfc_err("It seems that destionation buffers were not\n"
+ "requested.MFC requires that header should be generated\n"
+ "before allocating codec buffer.\n");
+ return -EAGAIN;
+ }
+
dev->curr_ctx = ctx->num;
s5p_mfc_clean_ctx_int_flags(ctx);
ret = s5p_mfc_set_enc_ref_buffer_v6(ctx);
@@ -1559,7 +1582,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
mfc_debug(1, "Seting new context to %p\n", ctx);
/* Got context to run in ctx */
mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n",
- ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt);
+ ctx->dst_queue_cnt, ctx->dpb_count, ctx->src_queue_cnt);
mfc_debug(1, "ctx->state=%d\n", ctx->state);
/* Last frame has already been sent to MFC
* Now obtaining frames from MFC buffer */
@@ -1624,7 +1647,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
case MFCINST_GOT_INST:
s5p_mfc_run_init_enc(ctx);
break;
- case MFCINST_HEAD_PRODUCED:
+ case MFCINST_HEAD_PARSED: /* Only for MFC6.x */
ret = s5p_mfc_run_init_enc_buffers(ctx);
break;
default:
@@ -1707,7 +1730,7 @@ static int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6);
}
-static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
+static int s5p_mfc_get_decoded_status_v6(struct s5p_mfc_dev *dev)
{
return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6);
}
diff --git a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 11d5f1dada32..6aa38a56aaf2 100644
--- a/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/trunk/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -50,6 +50,19 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
goto err_p_ip_clk;
}
+ pm->clock = clk_get(&dev->plat_dev->dev, dev->variant->mclk_name);
+ if (IS_ERR(pm->clock)) {
+ mfc_err("Failed to get MFC clock\n");
+ ret = PTR_ERR(pm->clock);
+ goto err_g_ip_clk_2;
+ }
+
+ ret = clk_prepare(pm->clock);
+ if (ret) {
+ mfc_err("Failed to prepare MFC clock\n");
+ goto err_p_ip_clk_2;
+ }
+
atomic_set(&pm->power, 0);
#ifdef CONFIG_PM_RUNTIME
pm->device = &dev->plat_dev->dev;
@@ -59,6 +72,10 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
atomic_set(&clk_ref, 0);
#endif
return 0;
+err_p_ip_clk_2:
+ clk_put(pm->clock);
+err_g_ip_clk_2:
+ clk_unprepare(pm->clock_gate);
err_p_ip_clk:
clk_put(pm->clock_gate);
err_g_ip_clk:
@@ -69,6 +86,8 @@ void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
{
clk_unprepare(pm->clock_gate);
clk_put(pm->clock_gate);
+ clk_unprepare(pm->clock);
+ clk_put(pm->clock);
#ifdef CONFIG_PM_RUNTIME
pm_runtime_disable(pm->device);
#endif
@@ -79,7 +98,7 @@ int s5p_mfc_clock_on(void)
int ret;
#ifdef CLK_DEBUG
atomic_inc(&clk_ref);
- mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
+ mfc_debug(3, "+ %d", atomic_read(&clk_ref));
#endif
ret = clk_enable(pm->clock_gate);
return ret;
@@ -89,7 +108,7 @@ void s5p_mfc_clock_off(void)
{
#ifdef CLK_DEBUG
atomic_dec(&clk_ref);
- mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
+ mfc_debug(3, "- %d", atomic_read(&clk_ref));
#endif
clk_disable(pm->clock_gate);
}
diff --git a/trunk/drivers/media/platform/sh_veu.c b/trunk/drivers/media/platform/sh_veu.c
index 59a9deefb242..0b32cc3f6a47 100644
--- a/trunk/drivers/media/platform/sh_veu.c
+++ b/trunk/drivers/media/platform/sh_veu.c
@@ -905,11 +905,11 @@ static int sh_veu_queue_setup(struct vb2_queue *vq,
if (ftmp.fmt.pix.width != pix->width ||
ftmp.fmt.pix.height != pix->height)
return -EINVAL;
- size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth :
- pix->width * pix->height * fmt->depth / fmt->ydepth;
+ size = pix->bytesperline ? pix->bytesperline * pix->height :
+ pix->width * pix->height * fmt->depth >> 3;
} else {
vfmt = sh_veu_get_vfmt(veu, vq->type);
- size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth;
+ size = vfmt->bytesperline * vfmt->frame.height;
}
if (count < 2)
@@ -1033,6 +1033,8 @@ static int sh_veu_release(struct file *file)
dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
+ pm_runtime_put(veu->dev);
+
if (veu_file == veu->capture) {
veu->capture = NULL;
vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
@@ -1048,8 +1050,6 @@ static int sh_veu_release(struct file *file)
veu->m2m_ctx = NULL;
}
- pm_runtime_put(veu->dev);
-
kfree(veu_file);
return 0;
@@ -1138,7 +1138,10 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id)
veu->xaction++;
- return IRQ_WAKE_THREAD;
+ if (!veu->aborting)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_HANDLED;
}
static int sh_veu_probe(struct platform_device *pdev)
diff --git a/trunk/drivers/media/platform/soc_camera/soc_camera.c b/trunk/drivers/media/platform/soc_camera/soc_camera.c
index 3a4efbdc7668..eea832c5fd01 100644
--- a/trunk/drivers/media/platform/soc_camera/soc_camera.c
+++ b/trunk/drivers/media/platform/soc_camera/soc_camera.c
@@ -643,9 +643,9 @@ static int soc_camera_close(struct file *file)
if (ici->ops->init_videobuf2)
vb2_queue_release(&icd->vb2_vidq);
- __soc_camera_power_off(icd);
-
ici->ops->remove(icd);
+
+ __soc_camera_power_off(icd);
}
if (icd->streamer == file)
diff --git a/trunk/drivers/media/radio/Kconfig b/trunk/drivers/media/radio/Kconfig
index d529ba788f41..c0beee2fa37c 100644
--- a/trunk/drivers/media/radio/Kconfig
+++ b/trunk/drivers/media/radio/Kconfig
@@ -22,7 +22,6 @@ config RADIO_SI476X
tristate "Silicon Laboratories Si476x I2C FM Radio"
depends on I2C && VIDEO_V4L2
depends on MFD_SI476X_CORE
- depends on SND_SOC
select SND_SOC_SI476X
---help---
Choose Y here if you have this FM radio chip.
diff --git a/trunk/drivers/media/radio/radio-si476x.c b/trunk/drivers/media/radio/radio-si476x.c
index 9dc8bafe6486..9430c6a29937 100644
--- a/trunk/drivers/media/radio/radio-si476x.c
+++ b/trunk/drivers/media/radio/radio-si476x.c
@@ -44,7 +44,7 @@
#define FREQ_MUL (10000000 / 625)
-#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0x80 & (status))
+#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0b10000000 & (status))
#define DRIVER_NAME "si476x-radio"
#define DRIVER_CARD "SI476x AM/FM Receiver"
diff --git a/trunk/drivers/media/tuners/Kconfig b/trunk/drivers/media/tuners/Kconfig
index 15665debc572..f6768cad001a 100644
--- a/trunk/drivers/media/tuners/Kconfig
+++ b/trunk/drivers/media/tuners/Kconfig
@@ -1,3 +1,23 @@
+config MEDIA_ATTACH
+ bool "Load and attach frontend and tuner driver modules as needed"
+ depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
+ depends on MODULES
+ default y if !EXPERT
+ help
+ Remove the static dependency of DVB card drivers on all
+ frontend modules for all possible card variants. Instead,
+ allow the card drivers to only load the frontend modules
+ they require.
+
+ Also, tuner module will automatically load a tuner driver
+ when needed, for analog mode.
+
+ This saves several KBytes of memory.
+
+ Note: You will need module-init-tools v3.2 or later for this feature.
+
+ If unsure say Y.
+
# Analog TV tuners, auto-loaded via tuner.ko
config MEDIA_TUNER
tristate
diff --git a/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 2cc8ec70e3b6..22015fe1a0f3 100644
--- a/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -376,7 +376,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf};
- struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf};
+ struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf};
dev_dbg(&d->udev->dev, "%s:\n", __func__);
@@ -481,9 +481,9 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
goto found;
}
- /* check R820T ID register; reg=00 val=69 */
+ /* check R820T by reading tuner stats at I2C addr 0x1a */
ret = rtl28xxu_ctrl_msg(d, &req_r820t);
- if (ret == 0 && buf[0] == 0x69) {
+ if (ret == 0) {
priv->tuner = TUNER_RTL2832_R820T;
priv->tuner_name = "R820T";
goto found;
diff --git a/trunk/drivers/media/usb/gspca/sonixb.c b/trunk/drivers/media/usb/gspca/sonixb.c
index d7ff3b9687c5..3fe207e038c7 100644
--- a/trunk/drivers/media/usb/gspca/sonixb.c
+++ b/trunk/drivers/media/usb/gspca/sonixb.c
@@ -1159,13 +1159,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
regs[0x01] = 0x44; /* Select 24 Mhz clock */
regs[0x12] = 0x02; /* Set hstart to 2 */
}
- break;
- case SENSOR_PAS202:
- /* For some unknown reason we need to increase hstart by 1 on
- the sn9c103, otherwise we get wrong colors (bayer shift). */
- if (sd->bridge == BRIDGE_103)
- regs[0x12] += 1;
- break;
}
/* Disable compression when the raw bayer format has been selected */
if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
diff --git a/trunk/drivers/media/usb/pwc/pwc.h b/trunk/drivers/media/usb/pwc/pwc.h
index 81b017a554bc..7a6a0d39c2c6 100644
--- a/trunk/drivers/media/usb/pwc/pwc.h
+++ b/trunk/drivers/media/usb/pwc/pwc.h
@@ -226,7 +226,7 @@ struct pwc_device
struct list_head queued_bufs;
spinlock_t queued_bufs_lock; /* Protects queued_bufs */
- /* If taking both locks vb_queue_lock must always be locked first! */
+ /* Note if taking both locks v4l2_lock must always be locked first! */
struct mutex v4l2_lock; /* Protects everything else */
struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */
diff --git a/trunk/drivers/media/v4l2-core/v4l2-ctrls.c b/trunk/drivers/media/v4l2-core/v4l2-ctrls.c
index fccd08b66d1a..ebb8e48619a2 100644
--- a/trunk/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/trunk/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1835,8 +1835,6 @@ bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
{
if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
return true;
- if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
- return true;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
case V4L2_CID_AUDIO_VOLUME:
diff --git a/trunk/drivers/media/v4l2-core/v4l2-ioctl.c b/trunk/drivers/media/v4l2-core/v4l2-ioctl.c
index 7658586fe5f4..f81bda1a48ec 100644
--- a/trunk/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/trunk/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -243,6 +243,7 @@ static void v4l_print_format(const void *arg, bool write_only)
const struct v4l2_vbi_format *vbi;
const struct v4l2_sliced_vbi_format *sliced;
const struct v4l2_window *win;
+ const struct v4l2_clip *clip;
unsigned i;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -252,7 +253,7 @@ static void v4l_print_format(const void *arg, bool write_only)
pix = &p->fmt.pix;
pr_cont(", width=%u, height=%u, "
"pixelformat=%c%c%c%c, field=%s, "
- "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
+ "bytesperline=%u sizeimage=%u, colorspace=%d\n",
pix->width, pix->height,
(pix->pixelformat & 0xff),
(pix->pixelformat >> 8) & 0xff,
@@ -283,14 +284,20 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
win = &p->fmt.win;
- /* Note: we can't print the clip list here since the clips
- * pointer is a userspace pointer, not a kernelspace
- * pointer. */
- pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n",
- win->w.width, win->w.height, win->w.left, win->w.top,
+ pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, "
+ "chromakey=0x%08x, bitmap=%p, "
+ "global_alpha=0x%02x\n",
+ win->w.width, win->w.height,
+ win->w.left, win->w.top,
prt_names(win->field, v4l2_field_names),
- win->chromakey, win->clipcount, win->clips,
- win->bitmap, win->global_alpha);
+ win->chromakey, win->bitmap, win->global_alpha);
+ clip = win->clips;
+ for (i = 0; i < win->clipcount; i++) {
+ printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n",
+ i, clip->c.width, clip->c.height,
+ clip->c.left, clip->c.top);
+ clip = clip->next;
+ }
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
@@ -325,7 +332,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only)
pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
"height=%u, pixelformat=%c%c%c%c, "
- "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
+ "bytesperline=%u sizeimage=%u, colorspace=%d\n",
p->capability, p->flags, p->base,
p->fmt.width, p->fmt.height,
(p->fmt.pixelformat & 0xff),
@@ -346,7 +353,7 @@ static void v4l_print_modulator(const void *arg, bool write_only)
const struct v4l2_modulator *p = arg;
if (write_only)
- pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
+ pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans);
else
pr_cont("index=%u, name=%.*s, capability=0x%x, "
"rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
@@ -438,13 +445,13 @@ static void v4l_print_buffer(const void *arg, bool write_only)
for (i = 0; i < p->length; ++i) {
plane = &p->m.planes[i];
printk(KERN_DEBUG
- "plane %d: bytesused=%d, data_offset=0x%08x, "
+ "plane %d: bytesused=%d, data_offset=0x%08x "
"offset/userptr=0x%lx, length=%d\n",
i, plane->bytesused, plane->data_offset,
plane->m.userptr, plane->length);
}
} else {
- pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n",
+ pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n",
p->bytesused, p->m.userptr, p->length);
}
@@ -497,8 +504,6 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
c->capability, c->outputmode,
c->timeperframe.numerator, c->timeperframe.denominator,
c->extendedmode, c->writebuffers);
- } else {
- pr_cont("\n");
}
}
@@ -729,11 +734,11 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
p->type);
switch (p->type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
- pr_cont(", wxh=%ux%u\n",
+ pr_cont(" wxh=%ux%u\n",
p->discrete.width, p->discrete.height);
break;
case V4L2_FRMSIZE_TYPE_STEPWISE:
- pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
+ pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n",
p->stepwise.min_width, p->stepwise.min_height,
p->stepwise.step_width, p->stepwise.step_height,
p->stepwise.max_width, p->stepwise.max_height);
@@ -759,12 +764,12 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only)
p->width, p->height, p->type);
switch (p->type) {
case V4L2_FRMIVAL_TYPE_DISCRETE:
- pr_cont(", fps=%d/%d\n",
+ pr_cont(" fps=%d/%d\n",
p->discrete.numerator,
p->discrete.denominator);
break;
case V4L2_FRMIVAL_TYPE_STEPWISE:
- pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n",
+ pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n",
p->stepwise.min.numerator,
p->stepwise.min.denominator,
p->stepwise.max.numerator,
@@ -802,8 +807,8 @@ static void v4l_print_event(const void *arg, bool write_only)
pr_cont("value64=%lld, ", c->value64);
else
pr_cont("value=%d, ", c->value);
- pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
- "default_value=%d\n",
+ pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d,"
+ " default_value=%d\n",
c->flags, c->minimum, c->maximum,
c->step, c->default_value);
break;
@@ -840,7 +845,7 @@ static void v4l_print_freq_band(const void *arg, bool write_only)
const struct v4l2_frequency_band *p = arg;
pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+ "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
p->tuner, p->type, p->index,
p->capability, p->rangelow,
p->rangehigh, p->modulation);
diff --git a/trunk/drivers/media/v4l2-core/v4l2-mem2mem.c b/trunk/drivers/media/v4l2-core/v4l2-mem2mem.c
index e96497f7c3ed..66f599fcb829 100644
--- a/trunk/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/trunk/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -205,7 +205,7 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
{
struct v4l2_m2m_dev *m2m_dev;
- unsigned long flags_job, flags_out, flags_cap;
+ unsigned long flags_job, flags;
m2m_dev = m2m_ctx->m2m_dev;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -223,26 +223,23 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
return;
}
- spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
+ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
- flags_out);
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("No input buffers available\n");
return;
}
- spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
+ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
- flags_cap);
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
- flags_out);
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("No output buffers available\n");
return;
}
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -374,20 +371,6 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
-/**
- * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
- * on the type
- */
-int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
- struct v4l2_create_buffers *create)
-{
- struct vb2_queue *vq;
-
- vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
- return vb2_create_bufs(vq, create);
-}
-EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
-
/**
* v4l2_m2m_expbuf() - export a source or destination buffer, depending on
* the type
@@ -503,10 +486,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
if (m2m_ctx->m2m_dev->m2m_ops->unlock)
m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
- if (list_empty(&src_q->done_list))
- poll_wait(file, &src_q->done_wq, wait);
- if (list_empty(&dst_q->done_list))
- poll_wait(file, &dst_q->done_wq, wait);
+ poll_wait(file, &src_q->done_wq, wait);
+ poll_wait(file, &dst_q->done_wq, wait);
if (m2m_ctx->m2m_dev->m2m_ops->lock)
m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
diff --git a/trunk/drivers/media/v4l2-core/videobuf2-core.c b/trunk/drivers/media/v4l2-core/videobuf2-core.c
index e3bdc3be91e1..7d833eefaf4e 100644
--- a/trunk/drivers/media/v4l2-core/videobuf2-core.c
+++ b/trunk/drivers/media/v4l2-core/videobuf2-core.c
@@ -2014,8 +2014,7 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
if (list_empty(&q->queued_list))
return res | POLLERR;
- if (list_empty(&q->done_list))
- poll_wait(file, &q->done_wq, wait);
+ poll_wait(file, &q->done_wq, wait);
/*
* Take first buffer available for dequeuing.
diff --git a/trunk/drivers/mfd/tps6586x.c b/trunk/drivers/mfd/tps6586x.c
index 4b93ed4d5cd6..721b9186a5d1 100644
--- a/trunk/drivers/mfd/tps6586x.c
+++ b/trunk/drivers/mfd/tps6586x.c
@@ -107,7 +107,7 @@ static struct mfd_cell tps6586x_cell[] = {
.name = "tps6586x-gpio",
},
{
- .name = "tps6586x-regulator",
+ .name = "tps6586x-pmic",
},
{
.name = "tps6586x-rtc",
diff --git a/trunk/drivers/misc/mei/init.c b/trunk/drivers/misc/mei/init.c
index f580d30bb784..713d89fedc46 100644
--- a/trunk/drivers/misc/mei/init.c
+++ b/trunk/drivers/misc/mei/init.c
@@ -197,8 +197,6 @@ void mei_stop(struct mei_device *dev)
{
dev_dbg(&dev->pdev->dev, "stopping the device.\n");
- flush_scheduled_work();
-
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
@@ -212,6 +210,8 @@ void mei_stop(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
+ flush_scheduled_work();
+
mei_watchdog_unregister(dev);
}
EXPORT_SYMBOL_GPL(mei_stop);
diff --git a/trunk/drivers/misc/mei/nfc.c b/trunk/drivers/misc/mei/nfc.c
index d0c6907dfd92..3adf8a70f26e 100644
--- a/trunk/drivers/misc/mei/nfc.c
+++ b/trunk/drivers/misc/mei/nfc.c
@@ -142,8 +142,6 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
mei_cl_unlink(ndev->cl_info);
kfree(ndev->cl_info);
}
-
- memset(ndev, 0, sizeof(struct mei_nfc_dev));
}
static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
diff --git a/trunk/drivers/misc/mei/pci-me.c b/trunk/drivers/misc/mei/pci-me.c
index 0f268329bd3a..a727464e9c3f 100644
--- a/trunk/drivers/misc/mei/pci-me.c
+++ b/trunk/drivers/misc/mei/pci-me.c
@@ -325,7 +325,6 @@ static int mei_me_pci_resume(struct device *device)
mutex_lock(&dev->device_lock);
dev->dev_state = MEI_DEV_POWER_UP;
- mei_clear_interrupts(dev);
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c
index f97569613526..29b846cbfb48 100644
--- a/trunk/drivers/net/bonding/bond_main.c
+++ b/trunk/drivers/net/bonding/bond_main.c
@@ -764,8 +764,8 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
struct net_device *bond_dev, *vlan_dev, *upper_dev;
struct vlan_entry *vlan;
- read_lock(&bond->lock);
rcu_read_lock();
+ read_lock(&bond->lock);
bond_dev = bond->dev;
@@ -787,19 +787,12 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
if (vlan_dev)
__bond_resend_igmp_join_requests(vlan_dev);
}
- rcu_read_unlock();
- /* We use curr_slave_lock to protect against concurrent access to
- * igmp_retrans from multiple running instances of this function and
- * bond_change_active_slave
- */
- write_lock_bh(&bond->curr_slave_lock);
- if (bond->igmp_retrans > 1) {
- bond->igmp_retrans--;
+ if (--bond->igmp_retrans > 0)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
- }
- write_unlock_bh(&bond->curr_slave_lock);
+
read_unlock(&bond->lock);
+ rcu_read_unlock();
}
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -1964,10 +1957,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
err_undo_flags:
bond_compute_features(bond);
- /* Enslave of first slave has failed and we need to fix master's mac */
- if (bond->slave_cnt == 0 &&
- ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
- eth_hw_addr_random(bond_dev);
return res;
}
@@ -2413,8 +2402,7 @@ static void bond_miimon_commit(struct bonding *bond)
pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
bond->dev->name, slave->dev->name,
- slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
- slave->duplex ? "full" : "half");
+ slave->speed, slave->duplex ? "full" : "half");
/* notify ad that the link status has changed */
if (bond->params.mode == BOND_MODE_8023AD)
diff --git a/trunk/drivers/net/bonding/bonding.h b/trunk/drivers/net/bonding/bonding.h
index f989e1529a29..2baec24388b1 100644
--- a/trunk/drivers/net/bonding/bonding.h
+++ b/trunk/drivers/net/bonding/bonding.h
@@ -225,7 +225,7 @@ struct bonding {
rwlock_t curr_slave_lock;
u8 send_peer_notif;
s8 setup_by_slave;
- u8 igmp_retrans;
+ s8 igmp_retrans;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
char proc_file_name[IFNAMSIZ];
diff --git a/trunk/drivers/net/can/usb/usb_8dev.c b/trunk/drivers/net/can/usb/usb_8dev.c
index cbd388eea682..6e15ef08f301 100644
--- a/trunk/drivers/net/can/usb/usb_8dev.c
+++ b/trunk/drivers/net/can/usb/usb_8dev.c
@@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
err = usb_8dev_cmd_version(priv, &version);
if (err) {
netdev_err(netdev, "can't get firmware version\n");
- goto cleanup_unregister_candev;
+ goto cleanup_cmd_msg_buffer;
} else {
netdev_info(netdev,
"firmware: %d.%d, hardware: %d.%d\n",
@@ -989,9 +989,6 @@ static int usb_8dev_probe(struct usb_interface *intf,
return 0;
-cleanup_unregister_candev:
- unregister_netdev(priv->netdev);
-
cleanup_cmd_msg_buffer:
kfree(priv->cmd_msg_buffer);
diff --git a/trunk/drivers/net/ethernet/atheros/Kconfig b/trunk/drivers/net/ethernet/atheros/Kconfig
index ad6aa1e98348..36d6abd1cfff 100644
--- a/trunk/drivers/net/ethernet/atheros/Kconfig
+++ b/trunk/drivers/net/ethernet/atheros/Kconfig
@@ -67,22 +67,4 @@ config ATL1C
To compile this driver as a module, choose M here. The module
will be called atl1c.
-config ALX
- tristate "Qualcomm Atheros AR816x/AR817x support"
- depends on PCI
- select CRC32
- select NET_CORE
- select MDIO
- help
- This driver supports the Qualcomm Atheros L1F ethernet adapter,
- i.e. the following chipsets:
-
- 1969:1091 - AR8161 Gigabit Ethernet
- 1969:1090 - AR8162 Fast Ethernet
- 1969:10A1 - AR8171 Gigabit Ethernet
- 1969:10A0 - AR8172 Fast Ethernet
-
- To compile this driver as a module, choose M here. The module
- will be called alx.
-
endif # NET_VENDOR_ATHEROS
diff --git a/trunk/drivers/net/ethernet/atheros/Makefile b/trunk/drivers/net/ethernet/atheros/Makefile
index 5cf1c65bbce9..e7e76fb576ff 100644
--- a/trunk/drivers/net/ethernet/atheros/Makefile
+++ b/trunk/drivers/net/ethernet/atheros/Makefile
@@ -6,4 +6,3 @@ obj-$(CONFIG_ATL1) += atlx/
obj-$(CONFIG_ATL2) += atlx/
obj-$(CONFIG_ATL1E) += atl1e/
obj-$(CONFIG_ATL1C) += atl1c/
-obj-$(CONFIG_ALX) += alx/
diff --git a/trunk/drivers/net/ethernet/atheros/alx/Makefile b/trunk/drivers/net/ethernet/atheros/alx/Makefile
deleted file mode 100644
index 5901fa407d52..000000000000
--- a/trunk/drivers/net/ethernet/atheros/alx/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_ALX) += alx.o
-alx-objs := main.o ethtool.o hw.o
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/trunk/drivers/net/ethernet/atheros/alx/alx.h b/trunk/drivers/net/ethernet/atheros/alx/alx.h
deleted file mode 100644
index 50b3ae2b143d..000000000000
--- a/trunk/drivers/net/ethernet/atheros/alx/alx.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2013 Johannes Berg
- *
- * This file is free software: you may copy, redistribute and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation, either version 2 of the License, or (at your
- * option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see .
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- *
- * Copyright (c) 2012 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _ALX_H_
-#define _ALX_H_
-
-#include
-#include
-#include
-#include
-#include "hw.h"
-
-#define ALX_WATCHDOG_TIME (5 * HZ)
-
-struct alx_buffer {
- struct sk_buff *skb;
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(size);
-};
-
-struct alx_rx_queue {
- struct alx_rrd *rrd;
- dma_addr_t rrd_dma;
-
- struct alx_rfd *rfd;
- dma_addr_t rfd_dma;
-
- struct alx_buffer *bufs;
-
- u16 write_idx, read_idx;
- u16 rrd_read_idx;
-};
-#define ALX_RX_ALLOC_THRESH 32
-
-struct alx_tx_queue {
- struct alx_txd *tpd;
- dma_addr_t tpd_dma;
- struct alx_buffer *bufs;
- u16 write_idx, read_idx;
-};
-
-#define ALX_DEFAULT_TX_WORK 128
-
-enum alx_device_quirks {
- ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
-};
-
-struct alx_priv {
- struct net_device *dev;
-
- struct alx_hw hw;
-
- /* all descriptor memory */
- struct {
- dma_addr_t dma;
- void *virt;
- int size;
- } descmem;
-
- /* protect int_mask updates */
- spinlock_t irq_lock;
- u32 int_mask;
-
- int tx_ringsz;
- int rx_ringsz;
- int rxbuf_size;
-
- struct napi_struct napi;
- struct alx_tx_queue txq;
- struct alx_rx_queue rxq;
-
- struct work_struct link_check_wk;
- struct work_struct reset_wk;
-
- u16 msg_enable;
-
- bool msi;
-};
-
-extern const struct ethtool_ops alx_ethtool_ops;
-extern const char alx_drv_name[];
-
-#endif
diff --git a/trunk/drivers/net/ethernet/atheros/alx/ethtool.c b/trunk/drivers/net/ethernet/atheros/alx/ethtool.c
deleted file mode 100644
index 6fa2aec2bc81..000000000000
--- a/trunk/drivers/net/ethernet/atheros/alx/ethtool.c
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (c) 2013 Johannes Berg
- *
- * This file is free software: you may copy, redistribute and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation, either version 2 of the License, or (at your
- * option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see .
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- *
- * Copyright (c) 2012 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include "alx.h"
-#include "reg.h"
-#include "hw.h"
-
-
-static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_hw *hw = &alx->hw;
-
- ecmd->supported = SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_TP |
- SUPPORTED_Pause;
- if (alx_hw_giga(hw))
- ecmd->supported |= SUPPORTED_1000baseT_Full;
-
- ecmd->advertising = ADVERTISED_TP;
- if (hw->adv_cfg & ADVERTISED_Autoneg)
- ecmd->advertising |= hw->adv_cfg;
-
- ecmd->port = PORT_TP;
- ecmd->phy_address = 0;
- if (hw->adv_cfg & ADVERTISED_Autoneg)
- ecmd->autoneg = AUTONEG_ENABLE;
- else
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_INTERNAL;
-
- if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
- if (hw->flowctrl & ALX_FC_RX) {
- ecmd->advertising |= ADVERTISED_Pause;
-
- if (!(hw->flowctrl & ALX_FC_TX))
- ecmd->advertising |= ADVERTISED_Asym_Pause;
- } else if (hw->flowctrl & ALX_FC_TX) {
- ecmd->advertising |= ADVERTISED_Asym_Pause;
- }
- }
-
- if (hw->link_speed != SPEED_UNKNOWN) {
- ethtool_cmd_speed_set(ecmd,
- hw->link_speed - hw->link_speed % 10);
- ecmd->duplex = hw->link_speed % 10;
- } else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
- }
-
- return 0;
-}
-
-static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_hw *hw = &alx->hw;
- u32 adv_cfg;
-
- ASSERT_RTNL();
-
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- if (ecmd->advertising & ADVERTISED_1000baseT_Half)
- return -EINVAL;
- adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
- } else {
- int speed = ethtool_cmd_speed(ecmd);
-
- switch (speed + ecmd->duplex) {
- case SPEED_10 + DUPLEX_HALF:
- adv_cfg = ADVERTISED_10baseT_Half;
- break;
- case SPEED_10 + DUPLEX_FULL:
- adv_cfg = ADVERTISED_10baseT_Full;
- break;
- case SPEED_100 + DUPLEX_HALF:
- adv_cfg = ADVERTISED_100baseT_Half;
- break;
- case SPEED_100 + DUPLEX_FULL:
- adv_cfg = ADVERTISED_100baseT_Full;
- break;
- default:
- return -EINVAL;
- }
- }
-
- hw->adv_cfg = adv_cfg;
- return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
-}
-
-static void alx_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_hw *hw = &alx->hw;
-
- if (hw->flowctrl & ALX_FC_ANEG &&
- hw->adv_cfg & ADVERTISED_Autoneg)
- pause->autoneg = AUTONEG_ENABLE;
- else
- pause->autoneg = AUTONEG_DISABLE;
-
- if (hw->flowctrl & ALX_FC_TX)
- pause->tx_pause = 1;
- else
- pause->tx_pause = 0;
-
- if (hw->flowctrl & ALX_FC_RX)
- pause->rx_pause = 1;
- else
- pause->rx_pause = 0;
-}
-
-
-static int alx_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_hw *hw = &alx->hw;
- int err = 0;
- bool reconfig_phy = false;
- u8 fc = 0;
-
- if (pause->tx_pause)
- fc |= ALX_FC_TX;
- if (pause->rx_pause)
- fc |= ALX_FC_RX;
- if (pause->autoneg)
- fc |= ALX_FC_ANEG;
-
- ASSERT_RTNL();
-
- /* restart auto-neg for auto-mode */
- if (hw->adv_cfg & ADVERTISED_Autoneg) {
- if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
- reconfig_phy = true;
- if (fc & hw->flowctrl & ALX_FC_ANEG &&
- (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
- reconfig_phy = true;
- }
-
- if (reconfig_phy) {
- err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
- return err;
- }
-
- /* flow control on mac */
- if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
- alx_cfg_mac_flowcontrol(hw, fc);
-
- hw->flowctrl = fc;
-
- return 0;
-}
-
-static u32 alx_get_msglevel(struct net_device *netdev)
-{
- struct alx_priv *alx = netdev_priv(netdev);
-
- return alx->msg_enable;
-}
-
-static void alx_set_msglevel(struct net_device *netdev, u32 data)
-{
- struct alx_priv *alx = netdev_priv(netdev);
-
- alx->msg_enable = data;
-}
-
-static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_hw *hw = &alx->hw;
-
- wol->supported = WAKE_MAGIC | WAKE_PHY;
- wol->wolopts = 0;
-
- if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
- wol->wolopts |= WAKE_MAGIC;
- if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
- wol->wolopts |= WAKE_PHY;
-}
-
-static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_hw *hw = &alx->hw;
-
- if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
- WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
- return -EOPNOTSUPP;
-
- hw->sleep_ctrl = 0;
-
- if (wol->wolopts & WAKE_MAGIC)
- hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
- if (wol->wolopts & WAKE_PHY)
- hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
-
- device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
-
- return 0;
-}
-
-static void alx_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct alx_priv *alx = netdev_priv(netdev);
-
- strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
- sizeof(drvinfo->bus_info));
-}
-
-const struct ethtool_ops alx_ethtool_ops = {
- .get_settings = alx_get_settings,
- .set_settings = alx_set_settings,
- .get_pauseparam = alx_get_pauseparam,
- .set_pauseparam = alx_set_pauseparam,
- .get_drvinfo = alx_get_drvinfo,
- .get_msglevel = alx_get_msglevel,
- .set_msglevel = alx_set_msglevel,
- .get_wol = alx_get_wol,
- .set_wol = alx_set_wol,
- .get_link = ethtool_op_get_link,
-};
diff --git a/trunk/drivers/net/ethernet/atheros/alx/hw.c b/trunk/drivers/net/ethernet/atheros/alx/hw.c
deleted file mode 100644
index 220a16ad0e49..000000000000
--- a/trunk/drivers/net/ethernet/atheros/alx/hw.c
+++ /dev/null
@@ -1,1226 +0,0 @@
-/*
- * Copyright (c) 2013 Johannes Berg
- *
- * This file is free software: you may copy, redistribute and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation, either version 2 of the License, or (at your
- * option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see .
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- *
- * Copyright (c) 2012 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include
-#include
-#include
-#include
-#include "reg.h"
-#include "hw.h"
-
-static inline bool alx_is_rev_a(u8 rev)
-{
- return rev == ALX_REV_A0 || rev == ALX_REV_A1;
-}
-
-static int alx_wait_mdio_idle(struct alx_hw *hw)
-{
- u32 val;
- int i;
-
- for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
- val = alx_read_mem32(hw, ALX_MDIO);
- if (!(val & ALX_MDIO_BUSY))
- return 0;
- udelay(10);
- }
-
- return -ETIMEDOUT;
-}
-
-static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
- u16 reg, u16 *phy_data)
-{
- u32 val, clk_sel;
- int err;
-
- *phy_data = 0;
-
- /* use slow clock when it's in hibernation status */
- clk_sel = hw->link_speed != SPEED_UNKNOWN ?
- ALX_MDIO_CLK_SEL_25MD4 :
- ALX_MDIO_CLK_SEL_25MD128;
-
- if (ext) {
- val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
- reg << ALX_MDIO_EXTN_REG_SHIFT;
- alx_write_mem32(hw, ALX_MDIO_EXTN, val);
-
- val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
- ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
- clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
- } else {
- val = ALX_MDIO_SPRES_PRMBL |
- clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
- reg << ALX_MDIO_REG_SHIFT |
- ALX_MDIO_START | ALX_MDIO_OP_READ;
- }
- alx_write_mem32(hw, ALX_MDIO, val);
-
- err = alx_wait_mdio_idle(hw);
- if (err)
- return err;
- val = alx_read_mem32(hw, ALX_MDIO);
- *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
- return 0;
-}
-
-static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
- u16 reg, u16 phy_data)
-{
- u32 val, clk_sel;
-
- /* use slow clock when it's in hibernation status */
- clk_sel = hw->link_speed != SPEED_UNKNOWN ?
- ALX_MDIO_CLK_SEL_25MD4 :
- ALX_MDIO_CLK_SEL_25MD128;
-
- if (ext) {
- val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
- reg << ALX_MDIO_EXTN_REG_SHIFT;
- alx_write_mem32(hw, ALX_MDIO_EXTN, val);
-
- val = ALX_MDIO_SPRES_PRMBL |
- clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
- phy_data << ALX_MDIO_DATA_SHIFT |
- ALX_MDIO_START | ALX_MDIO_MODE_EXT;
- } else {
- val = ALX_MDIO_SPRES_PRMBL |
- clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
- reg << ALX_MDIO_REG_SHIFT |
- phy_data << ALX_MDIO_DATA_SHIFT |
- ALX_MDIO_START;
- }
- alx_write_mem32(hw, ALX_MDIO, val);
-
- return alx_wait_mdio_idle(hw);
-}
-
-static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
-{
- return alx_read_phy_core(hw, false, 0, reg, phy_data);
-}
-
-static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
-{
- return alx_write_phy_core(hw, false, 0, reg, phy_data);
-}
-
-static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
-{
- return alx_read_phy_core(hw, true, dev, reg, pdata);
-}
-
-static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
-{
- return alx_write_phy_core(hw, true, dev, reg, data);
-}
-
-static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
-{
- int err;
-
- err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
- if (err)
- return err;
-
- return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
-}
-
-static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
-{
- int err;
-
- err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
- if (err)
- return err;
-
- return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
-}
-
-int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
-{
- int err;
-
- spin_lock(&hw->mdio_lock);
- err = __alx_read_phy_reg(hw, reg, phy_data);
- spin_unlock(&hw->mdio_lock);
-
- return err;
-}
-
-int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
-{
- int err;
-
- spin_lock(&hw->mdio_lock);
- err = __alx_write_phy_reg(hw, reg, phy_data);
- spin_unlock(&hw->mdio_lock);
-
- return err;
-}
-
-int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
-{
- int err;
-
- spin_lock(&hw->mdio_lock);
- err = __alx_read_phy_ext(hw, dev, reg, pdata);
- spin_unlock(&hw->mdio_lock);
-
- return err;
-}
-
-int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
-{
- int err;
-
- spin_lock(&hw->mdio_lock);
- err = __alx_write_phy_ext(hw, dev, reg, data);
- spin_unlock(&hw->mdio_lock);
-
- return err;
-}
-
-static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
-{
- int err;
-
- spin_lock(&hw->mdio_lock);
- err = __alx_read_phy_dbg(hw, reg, pdata);
- spin_unlock(&hw->mdio_lock);
-
- return err;
-}
-
-static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
-{
- int err;
-
- spin_lock(&hw->mdio_lock);
- err = __alx_write_phy_dbg(hw, reg, data);
- spin_unlock(&hw->mdio_lock);
-
- return err;
-}
-
-static u16 alx_get_phy_config(struct alx_hw *hw)
-{
- u32 val;
- u16 phy_val;
-
- val = alx_read_mem32(hw, ALX_PHY_CTRL);
- /* phy in reset */
- if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
- return ALX_DRV_PHY_UNKNOWN;
-
- val = alx_read_mem32(hw, ALX_DRV);
- val = ALX_GET_FIELD(val, ALX_DRV_PHY);
- if (ALX_DRV_PHY_UNKNOWN == val)
- return ALX_DRV_PHY_UNKNOWN;
-
- alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
- if (ALX_PHY_INITED == phy_val)
- return val;
-
- return ALX_DRV_PHY_UNKNOWN;
-}
-
-static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
-{
- u32 read;
- int i;
-
- for (i = 0; i < ALX_SLD_MAX_TO; i++) {
- read = alx_read_mem32(hw, reg);
- if ((read & wait) == 0) {
- if (val)
- *val = read;
- return true;
- }
- mdelay(1);
- }
-
- return false;
-}
-
-static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
-{
- u32 mac0, mac1;
-
- mac0 = alx_read_mem32(hw, ALX_STAD0);
- mac1 = alx_read_mem32(hw, ALX_STAD1);
-
- /* addr should be big-endian */
- *(__be32 *)(addr + 2) = cpu_to_be32(mac0);
- *(__be16 *)addr = cpu_to_be16(mac1);
-
- return is_valid_ether_addr(addr);
-}
-
-int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
-{
- u32 val;
-
- /* try to get it from register first */
- if (alx_read_macaddr(hw, addr))
- return 0;
-
- /* try to load from efuse */
- if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
- return -EIO;
- alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
- if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
- return -EIO;
- if (alx_read_macaddr(hw, addr))
- return 0;
-
- /* try to load from flash/eeprom (if present) */
- val = alx_read_mem32(hw, ALX_EFLD);
- if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
- if (!alx_wait_reg(hw, ALX_EFLD,
- ALX_EFLD_STAT | ALX_EFLD_START, &val))
- return -EIO;
- alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
- if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
- return -EIO;
- if (alx_read_macaddr(hw, addr))
- return 0;
- }
-
- return -EIO;
-}
-
-void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
-{
- u32 val;
-
- /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
- val = be32_to_cpu(*(__be32 *)(addr + 2));
- alx_write_mem32(hw, ALX_STAD0, val);
- val = be16_to_cpu(*(__be16 *)addr);
- alx_write_mem32(hw, ALX_STAD1, val);
-}
-
-static void alx_enable_osc(struct alx_hw *hw)
-{
- u32 val;
-
- /* rising edge */
- val = alx_read_mem32(hw, ALX_MISC);
- alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
- alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
-}
-
-static void alx_reset_osc(struct alx_hw *hw, u8 rev)
-{
- u32 val, val2;
-
- /* clear Internal OSC settings, switching OSC by hw itself */
- val = alx_read_mem32(hw, ALX_MISC3);
- alx_write_mem32(hw, ALX_MISC3,
- (val & ~ALX_MISC3_25M_BY_SW) |
- ALX_MISC3_25M_NOTO_INTNL);
-
- /* 25M clk from chipset may be unstable 1s after de-assert of
- * PERST, driver need re-calibrate before enter Sleep for WoL
- */
- val = alx_read_mem32(hw, ALX_MISC);
- if (rev >= ALX_REV_B0) {
- /* restore over current protection def-val,
- * this val could be reset by MAC-RST
- */
- ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
- /* a 0->1 change will update the internal val of osc */
- val &= ~ALX_MISC_INTNLOSC_OPEN;
- alx_write_mem32(hw, ALX_MISC, val);
- alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
- /* hw will automatically dis OSC after cab. */
- val2 = alx_read_mem32(hw, ALX_MSIC2);
- val2 &= ~ALX_MSIC2_CALB_START;
- alx_write_mem32(hw, ALX_MSIC2, val2);
- alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
- } else {
- val &= ~ALX_MISC_INTNLOSC_OPEN;
- /* disable isolate for rev A devices */
- if (alx_is_rev_a(rev))
- val &= ~ALX_MISC_ISO_EN;
-
- alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
- alx_write_mem32(hw, ALX_MISC, val);
- }
-
- udelay(20);
-}
-
-static int alx_stop_mac(struct alx_hw *hw)
-{
- u32 rxq, txq, val;
- u16 i;
-
- rxq = alx_read_mem32(hw, ALX_RXQ0);
- alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
- txq = alx_read_mem32(hw, ALX_TXQ0);
- alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
-
- udelay(40);
-
- hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
- alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
-
- for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
- val = alx_read_mem32(hw, ALX_MAC_STS);
- if (!(val & ALX_MAC_STS_IDLE))
- return 0;
- udelay(10);
- }
-
- return -ETIMEDOUT;
-}
-
-int alx_reset_mac(struct alx_hw *hw)
-{
- u32 val, pmctrl;
- int i, ret;
- u8 rev;
- bool a_cr;
-
- pmctrl = 0;
- rev = alx_hw_revision(hw);
- a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
-
- /* disable all interrupts, RXQ/TXQ */
- alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
- alx_write_mem32(hw, ALX_IMR, 0);
- alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
-
- ret = alx_stop_mac(hw);
- if (ret)
- return ret;
-
- /* mac reset workaroud */
- alx_write_mem32(hw, ALX_RFD_PIDX, 1);
-
- /* dis l0s/l1 before mac reset */
- if (a_cr) {
- pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
- if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
- alx_write_mem32(hw, ALX_PMCTRL,
- pmctrl & ~(ALX_PMCTRL_L1_EN |
- ALX_PMCTRL_L0S_EN));
- }
-
- /* reset whole mac safely */
- val = alx_read_mem32(hw, ALX_MASTER);
- alx_write_mem32(hw, ALX_MASTER,
- val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
-
- /* make sure it's real idle */
- udelay(10);
- for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
- val = alx_read_mem32(hw, ALX_RFD_PIDX);
- if (val == 0)
- break;
- udelay(10);
- }
- for (; i < ALX_DMA_MAC_RST_TO; i++) {
- val = alx_read_mem32(hw, ALX_MASTER);
- if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
- break;
- udelay(10);
- }
- if (i == ALX_DMA_MAC_RST_TO)
- return -EIO;
- udelay(10);
-
- if (a_cr) {
- alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
- /* restore l0s / l1 */
- if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
- alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
- }
-
- alx_reset_osc(hw, rev);
-
- /* clear Internal OSC settings, switching OSC by hw itself,
- * disable isolate for rev A devices
- */
- val = alx_read_mem32(hw, ALX_MISC3);
- alx_write_mem32(hw, ALX_MISC3,
- (val & ~ALX_MISC3_25M_BY_SW) |
- ALX_MISC3_25M_NOTO_INTNL);
- val = alx_read_mem32(hw, ALX_MISC);
- val &= ~ALX_MISC_INTNLOSC_OPEN;
- if (alx_is_rev_a(rev))
- val &= ~ALX_MISC_ISO_EN;
- alx_write_mem32(hw, ALX_MISC, val);
- udelay(20);
-
- /* driver control speed/duplex, hash-alg */
- alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
-
- val = alx_read_mem32(hw, ALX_SERDES);
- alx_write_mem32(hw, ALX_SERDES,
- val | ALX_SERDES_MACCLK_SLWDWN |
- ALX_SERDES_PHYCLK_SLWDWN);
-
- return 0;
-}
-
-void alx_reset_phy(struct alx_hw *hw)
-{
- int i;
- u32 val;
- u16 phy_val;
-
- /* (DSP)reset PHY core */
- val = alx_read_mem32(hw, ALX_PHY_CTRL);
- val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
- ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
- ALX_PHY_CTRL_CLS);
- val |= ALX_PHY_CTRL_RST_ANALOG;
-
- val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
- alx_write_mem32(hw, ALX_PHY_CTRL, val);
- udelay(10);
- alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
-
- for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
- udelay(10);
-
- /* phy power saving & hib */
- alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
- alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
- ALX_SYSMODCTRL_IECHOADJ_DEF);
- alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
- ALX_VDRVBIAS_DEF);
-
- /* EEE advertisement */
- val = alx_read_mem32(hw, ALX_LPI_CTRL);
- alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
- alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
-
- /* phy power saving */
- alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
- alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
- alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
- alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
- alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
- alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
- phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
- /* rtl8139c, 120m issue */
- alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
- ALX_MIIEXT_NLP78_120M_DEF);
- alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
- ALX_MIIEXT_S3DIG10_DEF);
-
- if (hw->lnk_patch) {
- /* Turn off half amplitude */
- alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
- &phy_val);
- alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
- phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
- /* Turn off Green feature */
- alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
- alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
- phy_val | ALX_GREENCFG2_BP_GREEN);
- /* Turn off half Bias */
- alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
- &phy_val);
- alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
- phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
- }
-
- /* set phy interrupt mask */
- alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
-}
-
-#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
-
-void alx_reset_pcie(struct alx_hw *hw)
-{
- u8 rev = alx_hw_revision(hw);
- u32 val;
- u16 val16;
-
- /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
- pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
- if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
- val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
- }
-
- /* clear WoL setting/status */
- val = alx_read_mem32(hw, ALX_WOL0);
- alx_write_mem32(hw, ALX_WOL0, 0);
-
- val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
- alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
-
- /* mask some pcie error bits */
- val = alx_read_mem32(hw, ALX_UE_SVRT);
- val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
- alx_write_mem32(hw, ALX_UE_SVRT, val);
-
- /* wol 25M & pclk */
- val = alx_read_mem32(hw, ALX_MASTER);
- if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
- if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
- (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
- alx_write_mem32(hw, ALX_MASTER,
- val | ALX_MASTER_PCLKSEL_SRDS |
- ALX_MASTER_WAKEN_25M);
- } else {
- if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
- (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
- alx_write_mem32(hw, ALX_MASTER,
- (val & ~ALX_MASTER_PCLKSEL_SRDS) |
- ALX_MASTER_WAKEN_25M);
- }
-
- /* ASPM setting */
- alx_enable_aspm(hw, true, true);
-
- udelay(10);
-}
-
-void alx_start_mac(struct alx_hw *hw)
-{
- u32 mac, txq, rxq;
-
- rxq = alx_read_mem32(hw, ALX_RXQ0);
- alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
- txq = alx_read_mem32(hw, ALX_TXQ0);
- alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
-
- mac = hw->rx_ctrl;
- if (hw->link_speed % 10 == DUPLEX_FULL)
- mac |= ALX_MAC_CTRL_FULLD;
- else
- mac &= ~ALX_MAC_CTRL_FULLD;
- ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
- hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
- ALX_MAC_CTRL_SPEED_10_100);
- mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
- hw->rx_ctrl = mac;
- alx_write_mem32(hw, ALX_MAC_CTRL, mac);
-}
-
-void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
-{
- if (fc & ALX_FC_RX)
- hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
- else
- hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
-
- if (fc & ALX_FC_TX)
- hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
- else
- hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
-
- alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
-}
-
-void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
-{
- u32 pmctrl;
- u8 rev = alx_hw_revision(hw);
-
- pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
-
- ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
- ALX_PMCTRL_LCKDET_TIMER_DEF);
- pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
- ALX_PMCTRL_L1_CLKSW_EN |
- ALX_PMCTRL_L1_SRDSRX_PWD;
- ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
- ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
- pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
- ALX_PMCTRL_L1_SRDSPLL_EN |
- ALX_PMCTRL_L1_BUFSRX_EN |
- ALX_PMCTRL_SADLY_EN |
- ALX_PMCTRL_HOTRST_WTEN|
- ALX_PMCTRL_L0S_EN |
- ALX_PMCTRL_L1_EN |
- ALX_PMCTRL_ASPM_FCEN |
- ALX_PMCTRL_TXL1_AFTER_L0S |
- ALX_PMCTRL_RXL1_AFTER_L0S);
- if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
- pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
-
- if (l0s_en)
- pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
- if (l1_en)
- pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
-
- alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
-}
-
-
-static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
-{
- u32 cfg = 0;
-
- if (ethadv_cfg & ADVERTISED_Autoneg) {
- cfg |= ALX_DRV_PHY_AUTO;
- if (ethadv_cfg & ADVERTISED_10baseT_Half)
- cfg |= ALX_DRV_PHY_10;
- if (ethadv_cfg & ADVERTISED_10baseT_Full)
- cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
- if (ethadv_cfg & ADVERTISED_100baseT_Half)
- cfg |= ALX_DRV_PHY_100;
- if (ethadv_cfg & ADVERTISED_100baseT_Full)
- cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
- if (ethadv_cfg & ADVERTISED_1000baseT_Half)
- cfg |= ALX_DRV_PHY_1000;
- if (ethadv_cfg & ADVERTISED_1000baseT_Full)
- cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
- if (ethadv_cfg & ADVERTISED_Pause)
- cfg |= ADVERTISE_PAUSE_CAP;
- if (ethadv_cfg & ADVERTISED_Asym_Pause)
- cfg |= ADVERTISE_PAUSE_ASYM;
- } else {
- switch (ethadv_cfg) {
- case ADVERTISED_10baseT_Half:
- cfg |= ALX_DRV_PHY_10;
- break;
- case ADVERTISED_100baseT_Half:
- cfg |= ALX_DRV_PHY_100;
- break;
- case ADVERTISED_10baseT_Full:
- cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
- break;
- case ADVERTISED_100baseT_Full:
- cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
- break;
- }
- }
-
- return cfg;
-}
-
-int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
-{
- u16 adv, giga, cr;
- u32 val;
- int err = 0;
-
- alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
- val = alx_read_mem32(hw, ALX_DRV);
- ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
-
- if (ethadv & ADVERTISED_Autoneg) {
- adv = ADVERTISE_CSMA;
- adv |= ethtool_adv_to_mii_adv_t(ethadv);
-
- if (flowctrl & ALX_FC_ANEG) {
- if (flowctrl & ALX_FC_RX) {
- adv |= ADVERTISED_Pause;
- if (!(flowctrl & ALX_FC_TX))
- adv |= ADVERTISED_Asym_Pause;
- } else if (flowctrl & ALX_FC_TX) {
- adv |= ADVERTISED_Asym_Pause;
- }
- }
- giga = 0;
- if (alx_hw_giga(hw))
- giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
-
- cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
-
- if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
- alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
- alx_write_phy_reg(hw, MII_BMCR, cr))
- err = -EBUSY;
- } else {
- cr = BMCR_RESET;
- if (ethadv == ADVERTISED_100baseT_Half ||
- ethadv == ADVERTISED_100baseT_Full)
- cr |= BMCR_SPEED100;
- if (ethadv == ADVERTISED_10baseT_Full ||
- ethadv == ADVERTISED_100baseT_Full)
- cr |= BMCR_FULLDPLX;
-
- err = alx_write_phy_reg(hw, MII_BMCR, cr);
- }
-
- if (!err) {
- alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
- val |= ethadv_to_hw_cfg(hw, ethadv);
- }
-
- alx_write_mem32(hw, ALX_DRV, val);
-
- return err;
-}
-
-
-void alx_post_phy_link(struct alx_hw *hw)
-{
- u16 phy_val, len, agc;
- u8 revid = alx_hw_revision(hw);
- bool adj_th = revid == ALX_REV_B0;
- int speed;
-
- if (hw->link_speed == SPEED_UNKNOWN)
- speed = SPEED_UNKNOWN;
- else
- speed = hw->link_speed - hw->link_speed % 10;
-
- if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
- return;
-
- /* 1000BT/AZ, wrong cable length */
- if (speed != SPEED_UNKNOWN) {
- alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
- &phy_val);
- len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
- alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
- agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
-
- if ((speed == SPEED_1000 &&
- (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
- (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
- (speed == SPEED_100 &&
- (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
- (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
- alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
- ALX_AZ_ANADECT_LONG);
- alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
- &phy_val);
- alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
- phy_val | ALX_AFE_10BT_100M_TH);
- } else {
- alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
- ALX_AZ_ANADECT_DEF);
- alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
- ALX_MIIEXT_AFE, &phy_val);
- alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
- phy_val & ~ALX_AFE_10BT_100M_TH);
- }
-
- /* threshold adjust */
- if (adj_th && hw->lnk_patch) {
- if (speed == SPEED_100) {
- alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
- ALX_MSE16DB_UP);
- } else if (speed == SPEED_1000) {
- /*
- * Giga link threshold, raise the tolerance of
- * noise 50%
- */
- alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
- &phy_val);
- ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
- ALX_MSE20DB_TH_HI);
- alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
- phy_val);
- }
- }
- } else {
- alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
- &phy_val);
- alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
- phy_val & ~ALX_AFE_10BT_100M_TH);
-
- if (adj_th && hw->lnk_patch) {
- alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
- ALX_MSE16DB_DOWN);
- alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
- ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
- ALX_MSE20DB_TH_DEF);
- alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
- }
- }
-}
-
-
-/* NOTE:
- * 1. phy link must be established before calling this function
- * 2. wol option (pattern,magic,link,etc.) is configed before call it.
- */
-int alx_pre_suspend(struct alx_hw *hw, int speed)
-{
- u32 master, mac, phy, val;
- int err = 0;
-
- master = alx_read_mem32(hw, ALX_MASTER);
- master &= ~ALX_MASTER_PCLKSEL_SRDS;
- mac = hw->rx_ctrl;
- /* 10/100 half */
- ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100);
- mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
-
- phy = alx_read_mem32(hw, ALX_PHY_CTRL);
- phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
- phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
- ALX_PHY_CTRL_HIB_EN;
-
- /* without any activity */
- if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
- err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
- if (err)
- return err;
- phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
- } else {
- if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
- mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
- if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
- mac |= ALX_MAC_CTRL_TX_EN;
- if (speed % 10 == DUPLEX_FULL)
- mac |= ALX_MAC_CTRL_FULLD;
- if (speed >= SPEED_1000)
- ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
- ALX_MAC_CTRL_SPEED_1000);
- phy |= ALX_PHY_CTRL_DSPRST_OUT;
- err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
- ALX_MIIEXT_S3DIG10,
- ALX_MIIEXT_S3DIG10_SL);
- if (err)
- return err;
- }
-
- alx_enable_osc(hw);
- hw->rx_ctrl = mac;
- alx_write_mem32(hw, ALX_MASTER, master);
- alx_write_mem32(hw, ALX_MAC_CTRL, mac);
- alx_write_mem32(hw, ALX_PHY_CTRL, phy);
-
- /* set val of PDLL D3PLLOFF */
- val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
- val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
- alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
-
- return 0;
-}
-
-bool alx_phy_configured(struct alx_hw *hw)
-{
- u32 cfg, hw_cfg;
-
- cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
- cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
- hw_cfg = alx_get_phy_config(hw);
-
- if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
- return false;
-
- return cfg == hw_cfg;
-}
-
-int alx_get_phy_link(struct alx_hw *hw, int *speed)
-{
- struct pci_dev *pdev = hw->pdev;
- u16 bmsr, giga;
- int err;
-
- err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
- if (err)
- return err;
-
- err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
- if (err)
- return err;
-
- if (!(bmsr & BMSR_LSTATUS)) {
- *speed = SPEED_UNKNOWN;
- return 0;
- }
-
- /* speed/duplex result is saved in PHY Specific Status Register */
- err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
- if (err)
- return err;
-
- if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
- goto wrong_speed;
-
- switch (giga & ALX_GIGA_PSSR_SPEED) {
- case ALX_GIGA_PSSR_1000MBS:
- *speed = SPEED_1000;
- break;
- case ALX_GIGA_PSSR_100MBS:
- *speed = SPEED_100;
- break;
- case ALX_GIGA_PSSR_10MBS:
- *speed = SPEED_10;
- break;
- default:
- goto wrong_speed;
- }
-
- *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
- return 1;
-
-wrong_speed:
- dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
- return -EINVAL;
-}
-
-int alx_clear_phy_intr(struct alx_hw *hw)
-{
- u16 isr;
-
- /* clear interrupt status by reading it */
- return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
-}
-
-int alx_config_wol(struct alx_hw *hw)
-{
- u32 wol = 0;
- int err = 0;
-
- /* turn on magic packet event */
- if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
- wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
-
- /* turn on link up event */
- if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
- wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
- /* only link up can wake up */
- err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
- }
- alx_write_mem32(hw, ALX_WOL0, wol);
-
- return err;
-}
-
-void alx_disable_rss(struct alx_hw *hw)
-{
- u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
-
- ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
- alx_write_mem32(hw, ALX_RXQ0, ctrl);
-}
-
-void alx_configure_basic(struct alx_hw *hw)
-{
- u32 val, raw_mtu, max_payload;
- u16 val16;
- u8 chip_rev = alx_hw_revision(hw);
-
- alx_set_macaddr(hw, hw->mac_addr);
-
- alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
-
- /* idle timeout to switch clk_125M */
- if (chip_rev >= ALX_REV_B0)
- alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
- ALX_IDLE_DECISN_TIMER_DEF);
-
- alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
-
- val = alx_read_mem32(hw, ALX_MASTER);
- val |= ALX_MASTER_IRQMOD2_EN |
- ALX_MASTER_IRQMOD1_EN |
- ALX_MASTER_SYSALVTIMER_EN;
- alx_write_mem32(hw, ALX_MASTER, val);
- alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
- (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
- /* intr re-trig timeout */
- alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
- /* tpd threshold to trig int */
- alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
- alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
-
- raw_mtu = hw->mtu + ETH_HLEN;
- alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
- if (raw_mtu > ALX_MTU_JUMBO_TH)
- hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
-
- if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
- val = (raw_mtu + 8 + 7) >> 3;
- else
- val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
- alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
-
- max_payload = pcie_get_readrq(hw->pdev) >> 8;
- /*
- * if BIOS had changed the default dma read max length,
- * restore it to default value
- */
- if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
- pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
-
- val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
- ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
- ALX_TXQ0_SUPT_IPOPT |
- ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
- alx_write_mem32(hw, ALX_TXQ0, val);
- val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
- ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
- ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
- ALX_HQTPD_BURST_EN;
- alx_write_mem32(hw, ALX_HQTPD, val);
-
- /* rxq, flow control */
- val = alx_read_mem32(hw, ALX_SRAM5);
- val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
- if (val > ALX_SRAM_RXF_LEN_8K) {
- val16 = ALX_MTU_STD_ALGN >> 3;
- val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
- } else {
- val16 = ALX_MTU_STD_ALGN >> 3;
- val = (val - ALX_MTU_STD_ALGN) >> 3;
- }
- alx_write_mem32(hw, ALX_RXQ2,
- val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
- val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
- val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
- ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
- ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
- ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
- ALX_RXQ0_IPV6_PARSE_EN;
-
- if (alx_hw_giga(hw))
- ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
- ALX_RXQ0_ASPM_THRESH_100M);
-
- alx_write_mem32(hw, ALX_RXQ0, val);
-
- val = alx_read_mem32(hw, ALX_DMA);
- val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
- ALX_DMA_RREQ_PRI_DATA |
- max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
- ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
- ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
- (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
- alx_write_mem32(hw, ALX_DMA, val);
-
- /* default multi-tx-q weights */
- val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
- 4 << ALX_WRR_PRI0_SHIFT |
- 4 << ALX_WRR_PRI1_SHIFT |
- 4 << ALX_WRR_PRI2_SHIFT |
- 4 << ALX_WRR_PRI3_SHIFT;
- alx_write_mem32(hw, ALX_WRR, val);
-}
-
-static inline u32 alx_speed_to_ethadv(int speed)
-{
- switch (speed) {
- case SPEED_1000 + DUPLEX_FULL:
- return ADVERTISED_1000baseT_Full;
- case SPEED_100 + DUPLEX_FULL:
- return ADVERTISED_100baseT_Full;
- case SPEED_100 + DUPLEX_HALF:
- return ADVERTISED_10baseT_Half;
- case SPEED_10 + DUPLEX_FULL:
- return ADVERTISED_10baseT_Full;
- case SPEED_10 + DUPLEX_HALF:
- return ADVERTISED_10baseT_Half;
- default:
- return 0;
- }
-}
-
-int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
-{
- int i, err, spd;
- u16 lpa;
-
- err = alx_get_phy_link(hw, &spd);
- if (err < 0)
- return err;
-
- if (spd == SPEED_UNKNOWN)
- return 0;
-
- err = alx_read_phy_reg(hw, MII_LPA, &lpa);
- if (err)
- return err;
-
- if (!(lpa & LPA_LPACK)) {
- *speed = spd;
- return 0;
- }
-
- if (lpa & LPA_10FULL)
- *speed = SPEED_10 + DUPLEX_FULL;
- else if (lpa & LPA_10HALF)
- *speed = SPEED_10 + DUPLEX_HALF;
- else if (lpa & LPA_100FULL)
- *speed = SPEED_100 + DUPLEX_FULL;
- else
- *speed = SPEED_100 + DUPLEX_HALF;
-
- if (*speed != spd) {
- err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
- if (err)
- return err;
- err = alx_setup_speed_duplex(hw,
- alx_speed_to_ethadv(*speed) |
- ADVERTISED_Autoneg,
- ALX_FC_ANEG | ALX_FC_RX |
- ALX_FC_TX);
- if (err)
- return err;
-
- /* wait for linkup */
- for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
- int speed2;
-
- msleep(100);
-
- err = alx_get_phy_link(hw, &speed2);
- if (err < 0)
- return err;
- if (speed2 != SPEED_UNKNOWN)
- break;
- }
- if (i == ALX_MAX_SETUP_LNK_CYCLE)
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-bool alx_get_phy_info(struct alx_hw *hw)
-{
- u16 devs1, devs2;
-
- if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
- alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
- return false;
-
- /* since we haven't PMA/PMD status2 register, we can't
- * use mdio45_probe function for prtad and mmds.
- * use fixed MMD3 to get mmds.
- */
- if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
- alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
- return false;
- hw->mdio.mmds = devs1 | devs2 << 16;
-
- return true;
-}
diff --git a/trunk/drivers/net/ethernet/atheros/alx/hw.h b/trunk/drivers/net/ethernet/atheros/alx/hw.h
deleted file mode 100644
index 65e723d2172a..000000000000
--- a/trunk/drivers/net/ethernet/atheros/alx/hw.h
+++ /dev/null
@@ -1,499 +0,0 @@
-/*
- * Copyright (c) 2013 Johannes Berg
- *
- * This file is free software: you may copy, redistribute and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation, either version 2 of the License, or (at your
- * option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see .
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- *
- * Copyright (c) 2012 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef ALX_HW_H_
-#define ALX_HW_H_
-#include
-#include
-#include
-#include "reg.h"
-
-/* Transmit Packet Descriptor, contains 4 32-bit words.
- *
- * 31 16 0
- * +----------------+----------------+
- * | vlan-tag | buf length |
- * +----------------+----------------+
- * | Word 1 |
- * +----------------+----------------+
- * | Word 2: buf addr lo |
- * +----------------+----------------+
- * | Word 3: buf addr hi |
- * +----------------+----------------+
- *
- * Word 2 and 3 combine to form a 64-bit buffer address
- *
- * Word 1 has three forms, depending on the state of bit 8/12/13:
- * if bit8 =='1', the definition is just for custom checksum offload.
- * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
- * for the skb is special for LSO V2, Word 2 become total skb length ,
- * Word 3 is meaningless.
- * other condition, the definition is for general skb or ip/tcp/udp
- * checksum or LSO(TSO) offload.
- *
- * Here is the depiction:
- *
- * 0-+ 0-+
- * 1 | 1 |
- * 2 | 2 |
- * 3 | Payload offset 3 | L4 header offset
- * 4 | (7:0) 4 | (7:0)
- * 5 | 5 |
- * 6 | 6 |
- * 7-+ 7-+
- * 8 Custom csum enable = 1 8 Custom csum enable = 0
- * 9 General IPv4 checksum 9 General IPv4 checksum
- * 10 General TCP checksum 10 General TCP checksum
- * 11 General UDP checksum 11 General UDP checksum
- * 12 Large Send Segment enable 12 Large Send Segment enable
- * 13 Large Send Segment type 13 Large Send Segment type
- * 14 VLAN tagged 14 VLAN tagged
- * 15 Insert VLAN tag 15 Insert VLAN tag
- * 16 IPv4 packet 16 IPv4 packet
- * 17 Ethernet frame type 17 Ethernet frame type
- * 18-+ 18-+
- * 19 | 19 |
- * 20 | 20 |
- * 21 | Custom csum offset 21 |
- * 22 | (25:18) 22 |
- * 23 | 23 | MSS (30:18)
- * 24 | 24 |
- * 25-+ 25 |
- * 26-+ 26 |
- * 27 | 27 |
- * 28 | Reserved 28 |
- * 29 | 29 |
- * 30-+ 30-+
- * 31 End of packet 31 End of packet
- */
-struct alx_txd {
- __le16 len;
- __le16 vlan_tag;
- __le32 word1;
- union {
- __le64 addr;
- struct {
- __le32 pkt_len;
- __le32 resvd;
- } l;
- } adrl;
-} __packed;
-
-/* tpd word 1 */
-#define TPD_CXSUMSTART_MASK 0x00FF
-#define TPD_CXSUMSTART_SHIFT 0
-#define TPD_L4HDROFFSET_MASK 0x00FF
-#define TPD_L4HDROFFSET_SHIFT 0
-#define TPD_CXSUM_EN_MASK 0x0001
-#define TPD_CXSUM_EN_SHIFT 8
-#define TPD_IP_XSUM_MASK 0x0001
-#define TPD_IP_XSUM_SHIFT 9
-#define TPD_TCP_XSUM_MASK 0x0001
-#define TPD_TCP_XSUM_SHIFT 10
-#define TPD_UDP_XSUM_MASK 0x0001
-#define TPD_UDP_XSUM_SHIFT 11
-#define TPD_LSO_EN_MASK 0x0001
-#define TPD_LSO_EN_SHIFT 12
-#define TPD_LSO_V2_MASK 0x0001
-#define TPD_LSO_V2_SHIFT 13
-#define TPD_VLTAGGED_MASK 0x0001
-#define TPD_VLTAGGED_SHIFT 14
-#define TPD_INS_VLTAG_MASK 0x0001
-#define TPD_INS_VLTAG_SHIFT 15
-#define TPD_IPV4_MASK 0x0001
-#define TPD_IPV4_SHIFT 16
-#define TPD_ETHTYPE_MASK 0x0001
-#define TPD_ETHTYPE_SHIFT 17
-#define TPD_CXSUMOFFSET_MASK 0x00FF
-#define TPD_CXSUMOFFSET_SHIFT 18
-#define TPD_MSS_MASK 0x1FFF
-#define TPD_MSS_SHIFT 18
-#define TPD_EOP_MASK 0x0001
-#define TPD_EOP_SHIFT 31
-
-#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
-
-/* Receive Free Descriptor */
-struct alx_rfd {
- __le64 addr; /* data buffer address, length is
- * declared in register --- every
- * buffer has the same size
- */
-} __packed;
-
-/* Receive Return Descriptor, contains 4 32-bit words.
- *
- * 31 16 0
- * +----------------+----------------+
- * | Word 0 |
- * +----------------+----------------+
- * | Word 1: RSS Hash value |
- * +----------------+----------------+
- * | Word 2 |
- * +----------------+----------------+
- * | Word 3 |
- * +----------------+----------------+
- *
- * Word 0 depiction & Word 2 depiction:
- *
- * 0--+ 0--+
- * 1 | 1 |
- * 2 | 2 |
- * 3 | 3 |
- * 4 | 4 |
- * 5 | 5 |
- * 6 | 6 |
- * 7 | IP payload checksum 7 | VLAN tag
- * 8 | (15:0) 8 | (15:0)
- * 9 | 9 |
- * 10 | 10 |
- * 11 | 11 |
- * 12 | 12 |
- * 13 | 13 |
- * 14 | 14 |
- * 15-+ 15-+
- * 16-+ 16-+
- * 17 | Number of RFDs 17 |
- * 18 | (19:16) 18 |
- * 19-+ 19 | Protocol ID
- * 20-+ 20 | (23:16)
- * 21 | 21 |
- * 22 | 22 |
- * 23 | 23-+
- * 24 | 24 | Reserved
- * 25 | Start index of RFD-ring 25-+
- * 26 | (31:20) 26 | RSS Q-num (27:25)
- * 27 | 27-+
- * 28 | 28-+
- * 29 | 29 | RSS Hash algorithm
- * 30 | 30 | (31:28)
- * 31-+ 31-+
- *
- * Word 3 depiction:
- *
- * 0--+
- * 1 |
- * 2 |
- * 3 |
- * 4 |
- * 5 |
- * 6 |
- * 7 | Packet length (include FCS)
- * 8 | (13:0)
- * 9 |
- * 10 |
- * 11 |
- * 12 |
- * 13-+
- * 14 L4 Header checksum error
- * 15 IPv4 checksum error
- * 16 VLAN tagged
- * 17-+
- * 18 | Protocol ID (19:17)
- * 19-+
- * 20 Receive error summary
- * 21 FCS(CRC) error
- * 22 Frame alignment error
- * 23 Truncated packet
- * 24 Runt packet
- * 25 Incomplete packet due to insufficient rx-desc
- * 26 Broadcast packet
- * 27 Multicast packet
- * 28 Ethernet type (EII or 802.3)
- * 29 FIFO overflow
- * 30 Length error (for 802.3, length field mismatch with actual len)
- * 31 Updated, indicate to driver that this RRD is refreshed.
- */
-struct alx_rrd {
- __le32 word0;
- __le32 rss_hash;
- __le32 word2;
- __le32 word3;
-} __packed;
-
-/* rrd word 0 */
-#define RRD_XSUM_MASK 0xFFFF
-#define RRD_XSUM_SHIFT 0
-#define RRD_NOR_MASK 0x000F
-#define RRD_NOR_SHIFT 16
-#define RRD_SI_MASK 0x0FFF
-#define RRD_SI_SHIFT 20
-
-/* rrd word 2 */
-#define RRD_VLTAG_MASK 0xFFFF
-#define RRD_VLTAG_SHIFT 0
-#define RRD_PID_MASK 0x00FF
-#define RRD_PID_SHIFT 16
-/* non-ip packet */
-#define RRD_PID_NONIP 0
-/* ipv4(only) */
-#define RRD_PID_IPV4 1
-/* tcp/ipv6 */
-#define RRD_PID_IPV6TCP 2
-/* tcp/ipv4 */
-#define RRD_PID_IPV4TCP 3
-/* udp/ipv6 */
-#define RRD_PID_IPV6UDP 4
-/* udp/ipv4 */
-#define RRD_PID_IPV4UDP 5
-/* ipv6(only) */
-#define RRD_PID_IPV6 6
-/* LLDP packet */
-#define RRD_PID_LLDP 7
-/* 1588 packet */
-#define RRD_PID_1588 8
-#define RRD_RSSQ_MASK 0x0007
-#define RRD_RSSQ_SHIFT 25
-#define RRD_RSSALG_MASK 0x000F
-#define RRD_RSSALG_SHIFT 28
-#define RRD_RSSALG_TCPV6 0x1
-#define RRD_RSSALG_IPV6 0x2
-#define RRD_RSSALG_TCPV4 0x4
-#define RRD_RSSALG_IPV4 0x8
-
-/* rrd word 3 */
-#define RRD_PKTLEN_MASK 0x3FFF
-#define RRD_PKTLEN_SHIFT 0
-#define RRD_ERR_L4_MASK 0x0001
-#define RRD_ERR_L4_SHIFT 14
-#define RRD_ERR_IPV4_MASK 0x0001
-#define RRD_ERR_IPV4_SHIFT 15
-#define RRD_VLTAGGED_MASK 0x0001
-#define RRD_VLTAGGED_SHIFT 16
-#define RRD_OLD_PID_MASK 0x0007
-#define RRD_OLD_PID_SHIFT 17
-#define RRD_ERR_RES_MASK 0x0001
-#define RRD_ERR_RES_SHIFT 20
-#define RRD_ERR_FCS_MASK 0x0001
-#define RRD_ERR_FCS_SHIFT 21
-#define RRD_ERR_FAE_MASK 0x0001
-#define RRD_ERR_FAE_SHIFT 22
-#define RRD_ERR_TRUNC_MASK 0x0001
-#define RRD_ERR_TRUNC_SHIFT 23
-#define RRD_ERR_RUNT_MASK 0x0001
-#define RRD_ERR_RUNT_SHIFT 24
-#define RRD_ERR_ICMP_MASK 0x0001
-#define RRD_ERR_ICMP_SHIFT 25
-#define RRD_BCAST_MASK 0x0001
-#define RRD_BCAST_SHIFT 26
-#define RRD_MCAST_MASK 0x0001
-#define RRD_MCAST_SHIFT 27
-#define RRD_ETHTYPE_MASK 0x0001
-#define RRD_ETHTYPE_SHIFT 28
-#define RRD_ERR_FIFOV_MASK 0x0001
-#define RRD_ERR_FIFOV_SHIFT 29
-#define RRD_ERR_LEN_MASK 0x0001
-#define RRD_ERR_LEN_SHIFT 30
-#define RRD_UPDATED_MASK 0x0001
-#define RRD_UPDATED_SHIFT 31
-
-
-#define ALX_MAX_SETUP_LNK_CYCLE 50
-
-/* for FlowControl */
-#define ALX_FC_RX 0x01
-#define ALX_FC_TX 0x02
-#define ALX_FC_ANEG 0x04
-
-/* for sleep control */
-#define ALX_SLEEP_WOL_PHY 0x00000001
-#define ALX_SLEEP_WOL_MAGIC 0x00000002
-#define ALX_SLEEP_CIFS 0x00000004
-#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \
- ALX_SLEEP_WOL_MAGIC | \
- ALX_SLEEP_CIFS)
-
-/* for RSS hash type */
-#define ALX_RSS_HASH_TYPE_IPV4 0x1
-#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2
-#define ALX_RSS_HASH_TYPE_IPV6 0x4
-#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8
-#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \
- ALX_RSS_HASH_TYPE_IPV4_TCP | \
- ALX_RSS_HASH_TYPE_IPV6 | \
- ALX_RSS_HASH_TYPE_IPV6_TCP)
-#define ALX_DEF_RXBUF_SIZE 1536
-#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
-#define ALX_MAX_TSO_PKT_SIZE (7*1024)
-#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
-#define ALX_MIN_FRAME_SIZE 68
-#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
-
-#define ALX_MAX_RX_QUEUES 8
-#define ALX_MAX_TX_QUEUES 4
-#define ALX_MAX_HANDLED_INTRS 5
-
-#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \
- ALX_ISR_DMAW | \
- ALX_ISR_DMAR | \
- ALX_ISR_SMB | \
- ALX_ISR_MANU | \
- ALX_ISR_TIMER)
-
-#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \
- ALX_ISR_DMAW | ALX_ISR_DMAR)
-
-#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \
- ALX_ISR_TXF_UR | \
- ALX_ISR_RFD_UR)
-
-#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \
- ALX_ISR_TX_Q1 | \
- ALX_ISR_TX_Q2 | \
- ALX_ISR_TX_Q3 | \
- ALX_ISR_RX_Q0 | \
- ALX_ISR_RX_Q1 | \
- ALX_ISR_RX_Q2 | \
- ALX_ISR_RX_Q3 | \
- ALX_ISR_RX_Q4 | \
- ALX_ISR_RX_Q5 | \
- ALX_ISR_RX_Q6 | \
- ALX_ISR_RX_Q7)
-
-/* maximum interrupt vectors for msix */
-#define ALX_MAX_MSIX_INTRS 16
-
-#define ALX_GET_FIELD(_data, _field) \
- (((_data) >> _field ## _SHIFT) & _field ## _MASK)
-
-#define ALX_SET_FIELD(_data, _field, _value) do { \
- (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \
- (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
- } while (0)
-
-struct alx_hw {
- struct pci_dev *pdev;
- u8 __iomem *hw_addr;
-
- /* current & permanent mac addr */
- u8 mac_addr[ETH_ALEN];
- u8 perm_addr[ETH_ALEN];
-
- u16 mtu;
- u16 imt;
- u8 dma_chnl;
- u8 max_dma_chnl;
- /* tpd threshold to trig INT */
- u32 ith_tpd;
- u32 rx_ctrl;
- u32 mc_hash[2];
-
- u32 smb_timer;
- /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
- int link_speed;
-
- /* auto-neg advertisement or force mode config */
- u32 adv_cfg;
- u8 flowctrl;
-
- u32 sleep_ctrl;
-
- spinlock_t mdio_lock;
- struct mdio_if_info mdio;
- u16 phy_id[2];
-
- /* PHY link patch flag */
- bool lnk_patch;
-};
-
-static inline int alx_hw_revision(struct alx_hw *hw)
-{
- return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
-}
-
-static inline bool alx_hw_with_cr(struct alx_hw *hw)
-{
- return hw->pdev->revision & 1;
-}
-
-static inline bool alx_hw_giga(struct alx_hw *hw)
-{
- return hw->pdev->device & 1;
-}
-
-static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
-{
- writeb(val, hw->hw_addr + reg);
-}
-
-static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
-{
- writew(val, hw->hw_addr + reg);
-}
-
-static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
-{
- return readw(hw->hw_addr + reg);
-}
-
-static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
-{
- writel(val, hw->hw_addr + reg);
-}
-
-static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
-{
- return readl(hw->hw_addr + reg);
-}
-
-static inline void alx_post_write(struct alx_hw *hw)
-{
- readl(hw->hw_addr);
-}
-
-int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
-void alx_reset_phy(struct alx_hw *hw);
-void alx_reset_pcie(struct alx_hw *hw);
-void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
-int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
-void alx_post_phy_link(struct alx_hw *hw);
-int alx_pre_suspend(struct alx_hw *hw, int speed);
-int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
-int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
-int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
-int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
-int alx_get_phy_link(struct alx_hw *hw, int *speed);
-int alx_clear_phy_intr(struct alx_hw *hw);
-int alx_config_wol(struct alx_hw *hw);
-void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
-void alx_start_mac(struct alx_hw *hw);
-int alx_reset_mac(struct alx_hw *hw);
-void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
-bool alx_phy_configured(struct alx_hw *hw);
-void alx_configure_basic(struct alx_hw *hw);
-void alx_disable_rss(struct alx_hw *hw);
-int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
-bool alx_get_phy_info(struct alx_hw *hw);
-
-#endif
diff --git a/trunk/drivers/net/ethernet/atheros/alx/main.c b/trunk/drivers/net/ethernet/atheros/alx/main.c
deleted file mode 100644
index 418de8b13165..000000000000
--- a/trunk/drivers/net/ethernet/atheros/alx/main.c
+++ /dev/null
@@ -1,1625 +0,0 @@
-/*
- * Copyright (c) 2013 Johannes Berg
- *
- * This file is free software: you may copy, redistribute and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation, either version 2 of the License, or (at your
- * option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see .
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- *
- * Copyright (c) 2012 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "alx.h"
-#include "hw.h"
-#include "reg.h"
-
-const char alx_drv_name[] = "alx";
-
-
-static void alx_free_txbuf(struct alx_priv *alx, int entry)
-{
- struct alx_buffer *txb = &alx->txq.bufs[entry];
-
- if (dma_unmap_len(txb, size)) {
- dma_unmap_single(&alx->hw.pdev->dev,
- dma_unmap_addr(txb, dma),
- dma_unmap_len(txb, size),
- DMA_TO_DEVICE);
- dma_unmap_len_set(txb, size, 0);
- }
-
- if (txb->skb) {
- dev_kfree_skb_any(txb->skb);
- txb->skb = NULL;
- }
-}
-
-static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
-{
- struct alx_rx_queue *rxq = &alx->rxq;
- struct sk_buff *skb;
- struct alx_buffer *cur_buf;
- dma_addr_t dma;
- u16 cur, next, count = 0;
-
- next = cur = rxq->write_idx;
- if (++next == alx->rx_ringsz)
- next = 0;
- cur_buf = &rxq->bufs[cur];
-
- while (!cur_buf->skb && next != rxq->read_idx) {
- struct alx_rfd *rfd = &rxq->rfd[cur];
-
- skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
- if (!skb)
- break;
- dma = dma_map_single(&alx->hw.pdev->dev,
- skb->data, alx->rxbuf_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
- dev_kfree_skb(skb);
- break;
- }
-
- /* Unfortunately, RX descriptor buffers must be 4-byte
- * aligned, so we can't use IP alignment.
- */
- if (WARN_ON(dma & 3)) {
- dev_kfree_skb(skb);
- break;
- }
-
- cur_buf->skb = skb;
- dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
- dma_unmap_addr_set(cur_buf, dma, dma);
- rfd->addr = cpu_to_le64(dma);
-
- cur = next;
- if (++next == alx->rx_ringsz)
- next = 0;
- cur_buf = &rxq->bufs[cur];
- count++;
- }
-
- if (count) {
- /* flush all updates before updating hardware */
- wmb();
- rxq->write_idx = cur;
- alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
- }
-
- return count;
-}
-
-static inline int alx_tpd_avail(struct alx_priv *alx)
-{
- struct alx_tx_queue *txq = &alx->txq;
-
- if (txq->write_idx >= txq->read_idx)
- return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
- return txq->read_idx - txq->write_idx - 1;
-}
-
-static bool alx_clean_tx_irq(struct alx_priv *alx)
-{
- struct alx_tx_queue *txq = &alx->txq;
- u16 hw_read_idx, sw_read_idx;
- unsigned int total_bytes = 0, total_packets = 0;
- int budget = ALX_DEFAULT_TX_WORK;
-
- sw_read_idx = txq->read_idx;
- hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
-
- if (sw_read_idx != hw_read_idx) {
- while (sw_read_idx != hw_read_idx && budget > 0) {
- struct sk_buff *skb;
-
- skb = txq->bufs[sw_read_idx].skb;
- if (skb) {
- total_bytes += skb->len;
- total_packets++;
- budget--;
- }
-
- alx_free_txbuf(alx, sw_read_idx);
-
- if (++sw_read_idx == alx->tx_ringsz)
- sw_read_idx = 0;
- }
- txq->read_idx = sw_read_idx;
-
- netdev_completed_queue(alx->dev, total_packets, total_bytes);
- }
-
- if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
- alx_tpd_avail(alx) > alx->tx_ringsz/4)
- netif_wake_queue(alx->dev);
-
- return sw_read_idx == hw_read_idx;
-}
-
-static void alx_schedule_link_check(struct alx_priv *alx)
-{
- schedule_work(&alx->link_check_wk);
-}
-
-static void alx_schedule_reset(struct alx_priv *alx)
-{
- schedule_work(&alx->reset_wk);
-}
-
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
-{
- struct alx_rx_queue *rxq = &alx->rxq;
- struct alx_rrd *rrd;
- struct alx_buffer *rxb;
- struct sk_buff *skb;
- u16 length, rfd_cleaned = 0;
-
- while (budget > 0) {
- rrd = &rxq->rrd[rxq->rrd_read_idx];
- if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
- break;
- rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
-
- if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
- RRD_SI) != rxq->read_idx ||
- ALX_GET_FIELD(le32_to_cpu(rrd->word0),
- RRD_NOR) != 1) {
- alx_schedule_reset(alx);
- return 0;
- }
-
- rxb = &rxq->bufs[rxq->read_idx];
- dma_unmap_single(&alx->hw.pdev->dev,
- dma_unmap_addr(rxb, dma),
- dma_unmap_len(rxb, size),
- DMA_FROM_DEVICE);
- dma_unmap_len_set(rxb, size, 0);
- skb = rxb->skb;
- rxb->skb = NULL;
-
- if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
- rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
- rrd->word3 = 0;
- dev_kfree_skb_any(skb);
- goto next_pkt;
- }
-
- length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
- RRD_PKTLEN) - ETH_FCS_LEN;
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, alx->dev);
-
- skb_checksum_none_assert(skb);
- if (alx->dev->features & NETIF_F_RXCSUM &&
- !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
- cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
- switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
- RRD_PID)) {
- case RRD_PID_IPV6UDP:
- case RRD_PID_IPV4UDP:
- case RRD_PID_IPV4TCP:
- case RRD_PID_IPV6TCP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- }
- }
-
- napi_gro_receive(&alx->napi, skb);
- budget--;
-
-next_pkt:
- if (++rxq->read_idx == alx->rx_ringsz)
- rxq->read_idx = 0;
- if (++rxq->rrd_read_idx == alx->rx_ringsz)
- rxq->rrd_read_idx = 0;
-
- if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
- rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
- }
-
- if (rfd_cleaned)
- alx_refill_rx_ring(alx, GFP_ATOMIC);
-
- return budget > 0;
-}
-
-static int alx_poll(struct napi_struct *napi, int budget)
-{
- struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
- struct alx_hw *hw = &alx->hw;
- bool complete = true;
- unsigned long flags;
-
- complete = alx_clean_tx_irq(alx) &&
- alx_clean_rx_irq(alx, budget);
-
- if (!complete)
- return 1;
-
- napi_complete(&alx->napi);
-
- /* enable interrupt */
- spin_lock_irqsave(&alx->irq_lock, flags);
- alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
- alx_write_mem32(hw, ALX_IMR, alx->int_mask);
- spin_unlock_irqrestore(&alx->irq_lock, flags);
-
- alx_post_write(hw);
-
- return 0;
-}
-
-static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
-{
- struct alx_hw *hw = &alx->hw;
- bool write_int_mask = false;
-
- spin_lock(&alx->irq_lock);
-
- /* ACK interrupt */
- alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
- intr &= alx->int_mask;
-
- if (intr & ALX_ISR_FATAL) {
- netif_warn(alx, hw, alx->dev,
- "fatal interrupt 0x%x, resetting\n", intr);
- alx_schedule_reset(alx);
- goto out;
- }
-
- if (intr & ALX_ISR_ALERT)
- netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
-
- if (intr & ALX_ISR_PHY) {
- /* suppress PHY interrupt, because the source
- * is from PHY internal. only the internal status
- * is cleared, the interrupt status could be cleared.
- */
- alx->int_mask &= ~ALX_ISR_PHY;
- write_int_mask = true;
- alx_schedule_link_check(alx);
- }
-
- if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
- napi_schedule(&alx->napi);
- /* mask rx/tx interrupt, enable them when napi complete */
- alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
- write_int_mask = true;
- }
-
- if (write_int_mask)
- alx_write_mem32(hw, ALX_IMR, alx->int_mask);
-
- alx_write_mem32(hw, ALX_ISR, 0);
-
- out:
- spin_unlock(&alx->irq_lock);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t alx_intr_msi(int irq, void *data)
-{
- struct alx_priv *alx = data;
-
- return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
-}
-
-static irqreturn_t alx_intr_legacy(int irq, void *data)
-{
- struct alx_priv *alx = data;
- struct alx_hw *hw = &alx->hw;
- u32 intr;
-
- intr = alx_read_mem32(hw, ALX_ISR);
-
- if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
- return IRQ_NONE;
-
- return alx_intr_handle(alx, intr);
-}
-
-static void alx_init_ring_ptrs(struct alx_priv *alx)
-{
- struct alx_hw *hw = &alx->hw;
- u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
-
- alx->rxq.read_idx = 0;
- alx->rxq.write_idx = 0;
- alx->rxq.rrd_read_idx = 0;
- alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
- alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
- alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
- alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
- alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
- alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
-
- alx->txq.read_idx = 0;
- alx->txq.write_idx = 0;
- alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
- alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
- alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
-
- /* load these pointers into the chip */
- alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
-}
-
-static void alx_free_txring_buf(struct alx_priv *alx)
-{
- struct alx_tx_queue *txq = &alx->txq;
- int i;
-
- if (!txq->bufs)
- return;
-
- for (i = 0; i < alx->tx_ringsz; i++)
- alx_free_txbuf(alx, i);
-
- memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
- memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
- txq->write_idx = 0;
- txq->read_idx = 0;
-
- netdev_reset_queue(alx->dev);
-}
-
-static void alx_free_rxring_buf(struct alx_priv *alx)
-{
- struct alx_rx_queue *rxq = &alx->rxq;
- struct alx_buffer *cur_buf;
- u16 i;
-
- if (rxq == NULL)
- return;
-
- for (i = 0; i < alx->rx_ringsz; i++) {
- cur_buf = rxq->bufs + i;
- if (cur_buf->skb) {
- dma_unmap_single(&alx->hw.pdev->dev,
- dma_unmap_addr(cur_buf, dma),
- dma_unmap_len(cur_buf, size),
- DMA_FROM_DEVICE);
- dev_kfree_skb(cur_buf->skb);
- cur_buf->skb = NULL;
- dma_unmap_len_set(cur_buf, size, 0);
- dma_unmap_addr_set(cur_buf, dma, 0);
- }
- }
-
- rxq->write_idx = 0;
- rxq->read_idx = 0;
- rxq->rrd_read_idx = 0;
-}
-
-static void alx_free_buffers(struct alx_priv *alx)
-{
- alx_free_txring_buf(alx);
- alx_free_rxring_buf(alx);
-}
-
-static int alx_reinit_rings(struct alx_priv *alx)
-{
- alx_free_buffers(alx);
-
- alx_init_ring_ptrs(alx);
-
- if (!alx_refill_rx_ring(alx, GFP_KERNEL))
- return -ENOMEM;
-
- return 0;
-}
-
-static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
-{
- u32 crc32, bit, reg;
-
- crc32 = ether_crc(ETH_ALEN, addr);
- reg = (crc32 >> 31) & 0x1;
- bit = (crc32 >> 26) & 0x1F;
-
- mc_hash[reg] |= BIT(bit);
-}
-
-static void __alx_set_rx_mode(struct net_device *netdev)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_hw *hw = &alx->hw;
- struct netdev_hw_addr *ha;
- u32 mc_hash[2] = {};
-
- if (!(netdev->flags & IFF_ALLMULTI)) {
- netdev_for_each_mc_addr(ha, netdev)
- alx_add_mc_addr(hw, ha->addr, mc_hash);
-
- alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
- alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
- }
-
- hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
- if (netdev->flags & IFF_PROMISC)
- hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
- if (netdev->flags & IFF_ALLMULTI)
- hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
-
- alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
-}
-
-static void alx_set_rx_mode(struct net_device *netdev)
-{
- __alx_set_rx_mode(netdev);
-}
-
-static int alx_set_mac_address(struct net_device *netdev, void *data)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_hw *hw = &alx->hw;
- struct sockaddr *addr = data;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- if (netdev->addr_assign_type & NET_ADDR_RANDOM)
- netdev->addr_assign_type ^= NET_ADDR_RANDOM;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
- alx_set_macaddr(hw, hw->mac_addr);
-
- return 0;
-}
-
-static int alx_alloc_descriptors(struct alx_priv *alx)
-{
- alx->txq.bufs = kcalloc(alx->tx_ringsz,
- sizeof(struct alx_buffer),
- GFP_KERNEL);
- if (!alx->txq.bufs)
- return -ENOMEM;
-
- alx->rxq.bufs = kcalloc(alx->rx_ringsz,
- sizeof(struct alx_buffer),
- GFP_KERNEL);
- if (!alx->rxq.bufs)
- goto out_free;
-
- /* physical tx/rx ring descriptors
- *
- * Allocate them as a single chunk because they must not cross a
- * 4G boundary (hardware has a single register for high 32 bits
- * of addresses only)
- */
- alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
- sizeof(struct alx_rrd) * alx->rx_ringsz +
- sizeof(struct alx_rfd) * alx->rx_ringsz;
- alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
- alx->descmem.size,
- &alx->descmem.dma,
- GFP_KERNEL);
- if (!alx->descmem.virt)
- goto out_free;
-
- alx->txq.tpd = (void *)alx->descmem.virt;
- alx->txq.tpd_dma = alx->descmem.dma;
-
- /* alignment requirement for next block */
- BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
-
- alx->rxq.rrd =
- (void *)((u8 *)alx->descmem.virt +
- sizeof(struct alx_txd) * alx->tx_ringsz);
- alx->rxq.rrd_dma = alx->descmem.dma +
- sizeof(struct alx_txd) * alx->tx_ringsz;
-
- /* alignment requirement for next block */
- BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
-
- alx->rxq.rfd =
- (void *)((u8 *)alx->descmem.virt +
- sizeof(struct alx_txd) * alx->tx_ringsz +
- sizeof(struct alx_rrd) * alx->rx_ringsz);
- alx->rxq.rfd_dma = alx->descmem.dma +
- sizeof(struct alx_txd) * alx->tx_ringsz +
- sizeof(struct alx_rrd) * alx->rx_ringsz;
-
- return 0;
-out_free:
- kfree(alx->txq.bufs);
- kfree(alx->rxq.bufs);
- return -ENOMEM;
-}
-
-static int alx_alloc_rings(struct alx_priv *alx)
-{
- int err;
-
- err = alx_alloc_descriptors(alx);
- if (err)
- return err;
-
- alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
- alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
- alx->tx_ringsz = alx->tx_ringsz;
-
- netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
-
- alx_reinit_rings(alx);
- return 0;
-}
-
-static void alx_free_rings(struct alx_priv *alx)
-{
- netif_napi_del(&alx->napi);
- alx_free_buffers(alx);
-
- kfree(alx->txq.bufs);
- kfree(alx->rxq.bufs);
-
- dma_free_coherent(&alx->hw.pdev->dev,
- alx->descmem.size,
- alx->descmem.virt,
- alx->descmem.dma);
-}
-
-static void alx_config_vector_mapping(struct alx_priv *alx)
-{
- struct alx_hw *hw = &alx->hw;
-
- alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
- alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
- alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
-}
-
-static void alx_irq_enable(struct alx_priv *alx)
-{
- struct alx_hw *hw = &alx->hw;
-
- /* level-1 interrupt switch */
- alx_write_mem32(hw, ALX_ISR, 0);
- alx_write_mem32(hw, ALX_IMR, alx->int_mask);
- alx_post_write(hw);
-}
-
-static void alx_irq_disable(struct alx_priv *alx)
-{
- struct alx_hw *hw = &alx->hw;
-
- alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
- alx_write_mem32(hw, ALX_IMR, 0);
- alx_post_write(hw);
-
- synchronize_irq(alx->hw.pdev->irq);
-}
-
-static int alx_request_irq(struct alx_priv *alx)
-{
- struct pci_dev *pdev = alx->hw.pdev;
- struct alx_hw *hw = &alx->hw;
- int err;
- u32 msi_ctrl;
-
- msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
-
- if (!pci_enable_msi(alx->hw.pdev)) {
- alx->msi = true;
-
- alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
- msi_ctrl | ALX_MSI_MASK_SEL_LINE);
- err = request_irq(pdev->irq, alx_intr_msi, 0,
- alx->dev->name, alx);
- if (!err)
- goto out;
- /* fall back to legacy interrupt */
- pci_disable_msi(alx->hw.pdev);
- }
-
- alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
- err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
- alx->dev->name, alx);
-out:
- if (!err)
- alx_config_vector_mapping(alx);
- return err;
-}
-
-static void alx_free_irq(struct alx_priv *alx)
-{
- struct pci_dev *pdev = alx->hw.pdev;
-
- free_irq(pdev->irq, alx);
-
- if (alx->msi) {
- pci_disable_msi(alx->hw.pdev);
- alx->msi = false;
- }
-}
-
-static int alx_identify_hw(struct alx_priv *alx)
-{
- struct alx_hw *hw = &alx->hw;
- int rev = alx_hw_revision(hw);
-
- if (rev > ALX_REV_C0)
- return -EINVAL;
-
- hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
-
- return 0;
-}
-
-static int alx_init_sw(struct alx_priv *alx)
-{
- struct pci_dev *pdev = alx->hw.pdev;
- struct alx_hw *hw = &alx->hw;
- int err;
-
- err = alx_identify_hw(alx);
- if (err) {
- dev_err(&pdev->dev, "unrecognized chip, aborting\n");
- return err;
- }
-
- alx->hw.lnk_patch =
- pdev->device == ALX_DEV_ID_AR8161 &&
- pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
- pdev->subsystem_device == 0x0091 &&
- pdev->revision == 0;
-
- hw->smb_timer = 400;
- hw->mtu = alx->dev->mtu;
- alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
- alx->tx_ringsz = 256;
- alx->rx_ringsz = 512;
- hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
- hw->imt = 200;
- alx->int_mask = ALX_ISR_MISC;
- hw->dma_chnl = hw->max_dma_chnl;
- hw->ith_tpd = alx->tx_ringsz / 3;
- hw->link_speed = SPEED_UNKNOWN;
- hw->adv_cfg = ADVERTISED_Autoneg |
- ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_1000baseT_Full;
- hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
-
- hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
- ALX_MAC_CTRL_MHASH_ALG_HI5B |
- ALX_MAC_CTRL_BRD_EN |
- ALX_MAC_CTRL_PCRCE |
- ALX_MAC_CTRL_CRCE |
- ALX_MAC_CTRL_RXFC_EN |
- ALX_MAC_CTRL_TXFC_EN |
- 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
-
- return err;
-}
-
-
-static netdev_features_t alx_fix_features(struct net_device *netdev,
- netdev_features_t features)
-{
- if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
- features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
-
- return features;
-}
-
-static void alx_netif_stop(struct alx_priv *alx)
-{
- alx->dev->trans_start = jiffies;
- if (netif_carrier_ok(alx->dev)) {
- netif_carrier_off(alx->dev);
- netif_tx_disable(alx->dev);
- napi_disable(&alx->napi);
- }
-}
-
-static void alx_halt(struct alx_priv *alx)
-{
- struct alx_hw *hw = &alx->hw;
-
- alx_netif_stop(alx);
- hw->link_speed = SPEED_UNKNOWN;
-
- alx_reset_mac(hw);
-
- /* disable l0s/l1 */
- alx_enable_aspm(hw, false, false);
- alx_irq_disable(alx);
- alx_free_buffers(alx);
-}
-
-static void alx_configure(struct alx_priv *alx)
-{
- struct alx_hw *hw = &alx->hw;
-
- alx_configure_basic(hw);
- alx_disable_rss(hw);
- __alx_set_rx_mode(alx->dev);
-
- alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
-}
-
-static void alx_activate(struct alx_priv *alx)
-{
- /* hardware setting lost, restore it */
- alx_reinit_rings(alx);
- alx_configure(alx);
-
- /* clear old interrupts */
- alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
-
- alx_irq_enable(alx);
-
- alx_schedule_link_check(alx);
-}
-
-static void alx_reinit(struct alx_priv *alx)
-{
- ASSERT_RTNL();
-
- alx_halt(alx);
- alx_activate(alx);
-}
-
-static int alx_change_mtu(struct net_device *netdev, int mtu)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
- if ((max_frame < ALX_MIN_FRAME_SIZE) ||
- (max_frame > ALX_MAX_FRAME_SIZE))
- return -EINVAL;
-
- if (netdev->mtu == mtu)
- return 0;
-
- netdev->mtu = mtu;
- alx->hw.mtu = mtu;
- alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
- ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
- netdev_update_features(netdev);
- if (netif_running(netdev))
- alx_reinit(alx);
- return 0;
-}
-
-static void alx_netif_start(struct alx_priv *alx)
-{
- netif_tx_wake_all_queues(alx->dev);
- napi_enable(&alx->napi);
- netif_carrier_on(alx->dev);
-}
-
-static int __alx_open(struct alx_priv *alx, bool resume)
-{
- int err;
-
- if (!resume)
- netif_carrier_off(alx->dev);
-
- err = alx_alloc_rings(alx);
- if (err)
- return err;
-
- alx_configure(alx);
-
- err = alx_request_irq(alx);
- if (err)
- goto out_free_rings;
-
- /* clear old interrupts */
- alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
-
- alx_irq_enable(alx);
-
- if (!resume)
- netif_tx_start_all_queues(alx->dev);
-
- alx_schedule_link_check(alx);
- return 0;
-
-out_free_rings:
- alx_free_rings(alx);
- return err;
-}
-
-static void __alx_stop(struct alx_priv *alx)
-{
- alx_halt(alx);
- alx_free_irq(alx);
- alx_free_rings(alx);
-}
-
-static const char *alx_speed_desc(u16 speed)
-{
- switch (speed) {
- case SPEED_1000 + DUPLEX_FULL:
- return "1 Gbps Full";
- case SPEED_100 + DUPLEX_FULL:
- return "100 Mbps Full";
- case SPEED_100 + DUPLEX_HALF:
- return "100 Mbps Half";
- case SPEED_10 + DUPLEX_FULL:
- return "10 Mbps Full";
- case SPEED_10 + DUPLEX_HALF:
- return "10 Mbps Half";
- default:
- return "Unknown speed";
- }
-}
-
-static void alx_check_link(struct alx_priv *alx)
-{
- struct alx_hw *hw = &alx->hw;
- unsigned long flags;
- int speed, old_speed;
- int err;
-
- /* clear PHY internal interrupt status, otherwise the main
- * interrupt status will be asserted forever
- */
- alx_clear_phy_intr(hw);
-
- err = alx_get_phy_link(hw, &speed);
- if (err < 0)
- goto reset;
-
- spin_lock_irqsave(&alx->irq_lock, flags);
- alx->int_mask |= ALX_ISR_PHY;
- alx_write_mem32(hw, ALX_IMR, alx->int_mask);
- spin_unlock_irqrestore(&alx->irq_lock, flags);
-
- old_speed = hw->link_speed;
-
- if (old_speed == speed)
- return;
- hw->link_speed = speed;
-
- if (speed != SPEED_UNKNOWN) {
- netif_info(alx, link, alx->dev,
- "NIC Up: %s\n", alx_speed_desc(speed));
- alx_post_phy_link(hw);
- alx_enable_aspm(hw, true, true);
- alx_start_mac(hw);
-
- if (old_speed == SPEED_UNKNOWN)
- alx_netif_start(alx);
- } else {
- /* link is now down */
- alx_netif_stop(alx);
- netif_info(alx, link, alx->dev, "Link Down\n");
- err = alx_reset_mac(hw);
- if (err)
- goto reset;
- alx_irq_disable(alx);
-
- /* MAC reset causes all HW settings to be lost, restore all */
- err = alx_reinit_rings(alx);
- if (err)
- goto reset;
- alx_configure(alx);
- alx_enable_aspm(hw, false, true);
- alx_post_phy_link(hw);
- alx_irq_enable(alx);
- }
-
- return;
-
-reset:
- alx_schedule_reset(alx);
-}
-
-static int alx_open(struct net_device *netdev)
-{
- return __alx_open(netdev_priv(netdev), false);
-}
-
-static int alx_stop(struct net_device *netdev)
-{
- __alx_stop(netdev_priv(netdev));
- return 0;
-}
-
-static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
-{
- struct alx_priv *alx = pci_get_drvdata(pdev);
- struct net_device *netdev = alx->dev;
- struct alx_hw *hw = &alx->hw;
- int err, speed;
-
- netif_device_detach(netdev);
-
- if (netif_running(netdev))
- __alx_stop(alx);
-
-#ifdef CONFIG_PM_SLEEP
- err = pci_save_state(pdev);
- if (err)
- return err;
-#endif
-
- err = alx_select_powersaving_speed(hw, &speed);
- if (err)
- return err;
- err = alx_clear_phy_intr(hw);
- if (err)
- return err;
- err = alx_pre_suspend(hw, speed);
- if (err)
- return err;
- err = alx_config_wol(hw);
- if (err)
- return err;
-
- *wol_en = false;
- if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
- netif_info(alx, wol, netdev,
- "wol: ctrl=%X, speed=%X\n",
- hw->sleep_ctrl, speed);
- device_set_wakeup_enable(&pdev->dev, true);
- *wol_en = true;
- }
-
- pci_disable_device(pdev);
-
- return 0;
-}
-
-static void alx_shutdown(struct pci_dev *pdev)
-{
- int err;
- bool wol_en;
-
- err = __alx_shutdown(pdev, &wol_en);
- if (!err) {
- pci_wake_from_d3(pdev, wol_en);
- pci_set_power_state(pdev, PCI_D3hot);
- } else {
- dev_err(&pdev->dev, "shutdown fail %d\n", err);
- }
-}
-
-static void alx_link_check(struct work_struct *work)
-{
- struct alx_priv *alx;
-
- alx = container_of(work, struct alx_priv, link_check_wk);
-
- rtnl_lock();
- alx_check_link(alx);
- rtnl_unlock();
-}
-
-static void alx_reset(struct work_struct *work)
-{
- struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
-
- rtnl_lock();
- alx_reinit(alx);
- rtnl_unlock();
-}
-
-static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
-{
- u8 cso, css;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
-
- cso = skb_checksum_start_offset(skb);
- if (cso & 1)
- return -EINVAL;
-
- css = cso + skb->csum_offset;
- first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
- first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
- first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
-
- return 0;
-}
-
-static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
-{
- struct alx_tx_queue *txq = &alx->txq;
- struct alx_txd *tpd, *first_tpd;
- dma_addr_t dma;
- int maplen, f, first_idx = txq->write_idx;
-
- first_tpd = &txq->tpd[txq->write_idx];
- tpd = first_tpd;
-
- maplen = skb_headlen(skb);
- dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&alx->hw.pdev->dev, dma))
- goto err_dma;
-
- dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
- dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
-
- tpd->adrl.addr = cpu_to_le64(dma);
- tpd->len = cpu_to_le16(maplen);
-
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
-
- if (++txq->write_idx == alx->tx_ringsz)
- txq->write_idx = 0;
- tpd = &txq->tpd[txq->write_idx];
-
- tpd->word1 = first_tpd->word1;
-
- maplen = skb_frag_size(frag);
- dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
- maplen, DMA_TO_DEVICE);
- if (dma_mapping_error(&alx->hw.pdev->dev, dma))
- goto err_dma;
- dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
- dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
-
- tpd->adrl.addr = cpu_to_le64(dma);
- tpd->len = cpu_to_le16(maplen);
- }
-
- /* last TPD, set EOP flag and store skb */
- tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
- txq->bufs[txq->write_idx].skb = skb;
-
- if (++txq->write_idx == alx->tx_ringsz)
- txq->write_idx = 0;
-
- return 0;
-
-err_dma:
- f = first_idx;
- while (f != txq->write_idx) {
- alx_free_txbuf(alx, f);
- if (++f == alx->tx_ringsz)
- f = 0;
- }
- return -ENOMEM;
-}
-
-static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_tx_queue *txq = &alx->txq;
- struct alx_txd *first;
- int tpdreq = skb_shinfo(skb)->nr_frags + 1;
-
- if (alx_tpd_avail(alx) < tpdreq) {
- netif_stop_queue(alx->dev);
- goto drop;
- }
-
- first = &txq->tpd[txq->write_idx];
- memset(first, 0, sizeof(*first));
-
- if (alx_tx_csum(skb, first))
- goto drop;
-
- if (alx_map_tx_skb(alx, skb) < 0)
- goto drop;
-
- netdev_sent_queue(alx->dev, skb->len);
-
- /* flush updates before updating hardware */
- wmb();
- alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
-
- if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
- netif_stop_queue(alx->dev);
-
- return NETDEV_TX_OK;
-
-drop:
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static void alx_tx_timeout(struct net_device *dev)
-{
- struct alx_priv *alx = netdev_priv(dev);
-
- alx_schedule_reset(alx);
-}
-
-static int alx_mdio_read(struct net_device *netdev,
- int prtad, int devad, u16 addr)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_hw *hw = &alx->hw;
- u16 val;
- int err;
-
- if (prtad != hw->mdio.prtad)
- return -EINVAL;
-
- if (devad == MDIO_DEVAD_NONE)
- err = alx_read_phy_reg(hw, addr, &val);
- else
- err = alx_read_phy_ext(hw, devad, addr, &val);
-
- if (err)
- return err;
- return val;
-}
-
-static int alx_mdio_write(struct net_device *netdev,
- int prtad, int devad, u16 addr, u16 val)
-{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_hw *hw = &alx->hw;
-
- if (prtad != hw->mdio.prtad)
- return -EINVAL;
-
- if (devad == MDIO_DEVAD_NONE)
- return alx_write_phy_reg(hw, addr, val);
-
- return alx_write_phy_ext(hw, devad, addr, val);
-}
-
-static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct alx_priv *alx = netdev_priv(netdev);
-
- if (!netif_running(netdev))
- return -EAGAIN;
-
- return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void alx_poll_controller(struct net_device *netdev)
-{
- struct alx_priv *alx = netdev_priv(netdev);
-
- if (alx->msi)
- alx_intr_msi(0, alx);
- else
- alx_intr_legacy(0, alx);
-}
-#endif
-
-static const struct net_device_ops alx_netdev_ops = {
- .ndo_open = alx_open,
- .ndo_stop = alx_stop,
- .ndo_start_xmit = alx_start_xmit,
- .ndo_set_rx_mode = alx_set_rx_mode,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = alx_set_mac_address,
- .ndo_change_mtu = alx_change_mtu,
- .ndo_do_ioctl = alx_ioctl,
- .ndo_tx_timeout = alx_tx_timeout,
- .ndo_fix_features = alx_fix_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = alx_poll_controller,
-#endif
-};
-
-static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct net_device *netdev;
- struct alx_priv *alx;
- struct alx_hw *hw;
- bool phy_configured;
- int bars, pm_cap, err;
-
- err = pci_enable_device_mem(pdev);
- if (err)
- return err;
-
- /* The alx chip can DMA to 64-bit addresses, but it uses a single
- * shared register for the high 32 bits, so only a single, aligned,
- * 4 GB physical address range can be used for descriptors.
- */
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA config, aborting\n");
- goto out_pci_disable;
- }
- }
- }
-
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- err = pci_request_selected_regions(pdev, bars, alx_drv_name);
- if (err) {
- dev_err(&pdev->dev,
- "pci_request_selected_regions failed(bars:%d)\n", bars);
- goto out_pci_disable;
- }
-
- pci_enable_pcie_error_reporting(pdev);
- pci_set_master(pdev);
-
- pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (pm_cap == 0) {
- dev_err(&pdev->dev,
- "Can't find power management capability, aborting\n");
- err = -EIO;
- goto out_pci_release;
- }
-
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- goto out_pci_release;
-
- netdev = alloc_etherdev(sizeof(*alx));
- if (!netdev) {
- err = -ENOMEM;
- goto out_pci_release;
- }
-
- SET_NETDEV_DEV(netdev, &pdev->dev);
- alx = netdev_priv(netdev);
- alx->dev = netdev;
- alx->hw.pdev = pdev;
- alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
- NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
- hw = &alx->hw;
- pci_set_drvdata(pdev, alx);
-
- hw->hw_addr = pci_ioremap_bar(pdev, 0);
- if (!hw->hw_addr) {
- dev_err(&pdev->dev, "cannot map device registers\n");
- err = -EIO;
- goto out_free_netdev;
- }
-
- netdev->netdev_ops = &alx_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
- netdev->irq = pdev->irq;
- netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
-
- if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
- pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
-
- err = alx_init_sw(alx);
- if (err) {
- dev_err(&pdev->dev, "net device private data init failed\n");
- goto out_unmap;
- }
-
- alx_reset_pcie(hw);
-
- phy_configured = alx_phy_configured(hw);
-
- if (!phy_configured)
- alx_reset_phy(hw);
-
- err = alx_reset_mac(hw);
- if (err) {
- dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
- goto out_unmap;
- }
-
- /* setup link to put it in a known good starting state */
- if (!phy_configured) {
- err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
- if (err) {
- dev_err(&pdev->dev,
- "failed to configure PHY speed/duplex (err=%d)\n",
- err);
- goto out_unmap;
- }
- }
-
- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
-
- if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
- dev_warn(&pdev->dev,
- "Invalid permanent address programmed, using random one\n");
- eth_hw_addr_random(netdev);
- memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
- }
-
- memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
- memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
- memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
-
- hw->mdio.prtad = 0;
- hw->mdio.mmds = 0;
- hw->mdio.dev = netdev;
- hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
- MDIO_SUPPORTS_C22 |
- MDIO_EMULATE_C22;
- hw->mdio.mdio_read = alx_mdio_read;
- hw->mdio.mdio_write = alx_mdio_write;
-
- if (!alx_get_phy_info(hw)) {
- dev_err(&pdev->dev, "failed to identify PHY\n");
- err = -EIO;
- goto out_unmap;
- }
-
- INIT_WORK(&alx->link_check_wk, alx_link_check);
- INIT_WORK(&alx->reset_wk, alx_reset);
- spin_lock_init(&alx->hw.mdio_lock);
- spin_lock_init(&alx->irq_lock);
-
- netif_carrier_off(netdev);
-
- err = register_netdev(netdev);
- if (err) {
- dev_err(&pdev->dev, "register netdevice failed\n");
- goto out_unmap;
- }
-
- device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
-
- netdev_info(netdev,
- "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
- netdev->dev_addr);
-
- return 0;
-
-out_unmap:
- iounmap(hw->hw_addr);
-out_free_netdev:
- free_netdev(netdev);
-out_pci_release:
- pci_release_selected_regions(pdev, bars);
-out_pci_disable:
- pci_disable_device(pdev);
- return err;
-}
-
-static void alx_remove(struct pci_dev *pdev)
-{
- struct alx_priv *alx = pci_get_drvdata(pdev);
- struct alx_hw *hw = &alx->hw;
-
- cancel_work_sync(&alx->link_check_wk);
- cancel_work_sync(&alx->reset_wk);
-
- /* restore permanent mac address */
- alx_set_macaddr(hw, hw->perm_addr);
-
- unregister_netdev(alx->dev);
- iounmap(hw->hw_addr);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
-
- pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- free_netdev(alx->dev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int alx_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int err;
- bool wol_en;
-
- err = __alx_shutdown(pdev, &wol_en);
- if (err) {
- dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
- return err;
- }
-
- if (wol_en) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
-}
-
-static int alx_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alx_priv *alx = pci_get_drvdata(pdev);
- struct net_device *netdev = alx->dev;
- struct alx_hw *hw = &alx->hw;
- int err;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_save_state(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
- hw->link_speed = SPEED_UNKNOWN;
- alx->int_mask = ALX_ISR_MISC;
-
- alx_reset_pcie(hw);
- alx_reset_phy(hw);
-
- err = alx_reset_mac(hw);
- if (err) {
- netif_err(alx, hw, alx->dev,
- "resume:reset_mac fail %d\n", err);
- return -EIO;
- }
-
- err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
- if (err) {
- netif_err(alx, hw, alx->dev,
- "resume:setup_speed_duplex fail %d\n", err);
- return -EIO;
- }
-
- if (netif_running(netdev)) {
- err = __alx_open(alx, true);
- if (err)
- return err;
- }
-
- netif_device_attach(netdev);
-
- return err;
-}
-#endif
-
-static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct alx_priv *alx = pci_get_drvdata(pdev);
- struct net_device *netdev = alx->dev;
- pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
-
- dev_info(&pdev->dev, "pci error detected\n");
-
- rtnl_lock();
-
- if (netif_running(netdev)) {
- netif_device_detach(netdev);
- alx_halt(alx);
- }
-
- if (state == pci_channel_io_perm_failure)
- rc = PCI_ERS_RESULT_DISCONNECT;
- else
- pci_disable_device(pdev);
-
- rtnl_unlock();
-
- return rc;
-}
-
-static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
-{
- struct alx_priv *alx = pci_get_drvdata(pdev);
- struct alx_hw *hw = &alx->hw;
- pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
-
- dev_info(&pdev->dev, "pci error slot reset\n");
-
- rtnl_lock();
-
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
- goto out;
- }
-
- pci_set_master(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
- alx_reset_pcie(hw);
- if (!alx_reset_mac(hw))
- rc = PCI_ERS_RESULT_RECOVERED;
-out:
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- rtnl_unlock();
-
- return rc;
-}
-
-static void alx_pci_error_resume(struct pci_dev *pdev)
-{
- struct alx_priv *alx = pci_get_drvdata(pdev);
- struct net_device *netdev = alx->dev;
-
- dev_info(&pdev->dev, "pci error resume\n");
-
- rtnl_lock();
-
- if (netif_running(netdev)) {
- alx_activate(alx);
- netif_device_attach(netdev);
- }
-
- rtnl_unlock();
-}
-
-static const struct pci_error_handlers alx_err_handlers = {
- .error_detected = alx_pci_error_detected,
- .slot_reset = alx_pci_error_slot_reset,
- .resume = alx_pci_error_resume,
-};
-
-#ifdef CONFIG_PM_SLEEP
-static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
-#define ALX_PM_OPS (&alx_pm_ops)
-#else
-#define ALX_PM_OPS NULL
-#endif
-
-static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
- { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
- .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
- { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
- .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
- { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
- .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
- { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
- { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
- {}
-};
-
-static struct pci_driver alx_driver = {
- .name = alx_drv_name,
- .id_table = alx_pci_tbl,
- .probe = alx_probe,
- .remove = alx_remove,
- .shutdown = alx_shutdown,
- .err_handler = &alx_err_handlers,
- .driver.pm = ALX_PM_OPS,
-};
-
-module_pci_driver(alx_driver);
-MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
-MODULE_AUTHOR("Johannes Berg ");
-MODULE_AUTHOR("Qualcomm Corporation, ");
-MODULE_DESCRIPTION(
- "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
-MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/net/ethernet/atheros/alx/reg.h b/trunk/drivers/net/ethernet/atheros/alx/reg.h
deleted file mode 100644
index e4358c98bc4e..000000000000
--- a/trunk/drivers/net/ethernet/atheros/alx/reg.h
+++ /dev/null
@@ -1,810 +0,0 @@
-/*
- * Copyright (c) 2013 Johannes Berg
- *
- * This file is free software: you may copy, redistribute and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation, either version 2 of the License, or (at your
- * option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see .
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- *
- * Copyright (c) 2012 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef ALX_REG_H
-#define ALX_REG_H
-
-#define ALX_DEV_ID_AR8161 0x1091
-#define ALX_DEV_ID_E2200 0xe091
-#define ALX_DEV_ID_AR8162 0x1090
-#define ALX_DEV_ID_AR8171 0x10A1
-#define ALX_DEV_ID_AR8172 0x10A0
-
-/* rev definition,
- * bit(0): with xD support
- * bit(1): with Card Reader function
- * bit(7:2): real revision
- */
-#define ALX_PCI_REVID_SHIFT 3
-#define ALX_REV_A0 0
-#define ALX_REV_A1 1
-#define ALX_REV_B0 2
-#define ALX_REV_C0 3
-
-#define ALX_DEV_CTRL 0x0060
-#define ALX_DEV_CTRL_MAXRRS_MIN 2
-
-#define ALX_MSIX_MASK 0x0090
-
-#define ALX_UE_SVRT 0x010C
-#define ALX_UE_SVRT_FCPROTERR BIT(13)
-#define ALX_UE_SVRT_DLPROTERR BIT(4)
-
-/* eeprom & flash load register */
-#define ALX_EFLD 0x0204
-#define ALX_EFLD_F_EXIST BIT(10)
-#define ALX_EFLD_E_EXIST BIT(9)
-#define ALX_EFLD_STAT BIT(5)
-#define ALX_EFLD_START BIT(0)
-
-/* eFuse load register */
-#define ALX_SLD 0x0218
-#define ALX_SLD_STAT BIT(12)
-#define ALX_SLD_START BIT(11)
-#define ALX_SLD_MAX_TO 100
-
-#define ALX_PDLL_TRNS1 0x1104
-#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11)
-
-#define ALX_PMCTRL 0x12F8
-#define ALX_PMCTRL_HOTRST_WTEN BIT(31)
-/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
-#define ALX_PMCTRL_ASPM_FCEN BIT(30)
-#define ALX_PMCTRL_SADLY_EN BIT(29)
-#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF
-#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24
-#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC
-/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
-#define ALX_PMCTRL_L1REQ_TO_MASK 0xF
-#define ALX_PMCTRL_L1REQ_TO_SHIFT 20
-#define ALX_PMCTRL_L1REG_TO_DEF 0xF
-#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19)
-#define ALX_PMCTRL_L1_TIMER_MASK 0x7
-#define ALX_PMCTRL_L1_TIMER_SHIFT 16
-#define ALX_PMCTRL_L1_TIMER_16US 4
-#define ALX_PMCTRL_RCVR_WT_1US BIT(15)
-/* bit13: enable pcie clk switch in L1 state */
-#define ALX_PMCTRL_L1_CLKSW_EN BIT(13)
-#define ALX_PMCTRL_L0S_EN BIT(12)
-#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11)
-#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7)
-/* bit6: power down serdes RX */
-#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6)
-#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5)
-#define ALX_PMCTRL_L1_SRDS_EN BIT(4)
-#define ALX_PMCTRL_L1_EN BIT(3)
-
-/*******************************************************/
-/* following registers are mapped only to memory space */
-/*******************************************************/
-
-#define ALX_MASTER 0x1400
-/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
-#define ALX_MASTER_PCLKSEL_SRDS BIT(12)
-/* bit11: irq moduration for rx */
-#define ALX_MASTER_IRQMOD2_EN BIT(11)
-/* bit10: irq moduration for tx/rx */
-#define ALX_MASTER_IRQMOD1_EN BIT(10)
-#define ALX_MASTER_SYSALVTIMER_EN BIT(7)
-#define ALX_MASTER_OOB_DIS BIT(6)
-/* bit5: wakeup without pcie clk */
-#define ALX_MASTER_WAKEN_25M BIT(5)
-/* bit0: MAC & DMA reset */
-#define ALX_MASTER_DMA_MAC_RST BIT(0)
-#define ALX_DMA_MAC_RST_TO 50
-
-#define ALX_IRQ_MODU_TIMER 0x1408
-#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF
-#define ALX_IRQ_MODU_TIMER1_SHIFT 0
-
-#define ALX_PHY_CTRL 0x140C
-#define ALX_PHY_CTRL_100AB_EN BIT(17)
-/* bit14: affect MAC & PHY, go to low power sts */
-#define ALX_PHY_CTRL_POWER_DOWN BIT(14)
-/* bit13: 1:pll always ON, 0:can switch in lpw */
-#define ALX_PHY_CTRL_PLL_ON BIT(13)
-#define ALX_PHY_CTRL_RST_ANALOG BIT(12)
-#define ALX_PHY_CTRL_HIB_PULSE BIT(11)
-#define ALX_PHY_CTRL_HIB_EN BIT(10)
-#define ALX_PHY_CTRL_IDDQ BIT(7)
-#define ALX_PHY_CTRL_GATE_25M BIT(5)
-#define ALX_PHY_CTRL_LED_MODE BIT(2)
-/* bit0: out of dsp RST state */
-#define ALX_PHY_CTRL_DSPRST_OUT BIT(0)
-#define ALX_PHY_CTRL_DSPRST_TO 80
-#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \
- ALX_PHY_CTRL_100AB_EN | \
- ALX_PHY_CTRL_PLL_ON)
-
-#define ALX_MAC_STS 0x1410
-#define ALX_MAC_STS_TXQ_BUSY BIT(3)
-#define ALX_MAC_STS_RXQ_BUSY BIT(2)
-#define ALX_MAC_STS_TXMAC_BUSY BIT(1)
-#define ALX_MAC_STS_RXMAC_BUSY BIT(0)
-#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \
- ALX_MAC_STS_RXQ_BUSY | \
- ALX_MAC_STS_TXMAC_BUSY | \
- ALX_MAC_STS_RXMAC_BUSY)
-
-#define ALX_MDIO 0x1414
-#define ALX_MDIO_MODE_EXT BIT(30)
-#define ALX_MDIO_BUSY BIT(27)
-#define ALX_MDIO_CLK_SEL_MASK 0x7
-#define ALX_MDIO_CLK_SEL_SHIFT 24
-#define ALX_MDIO_CLK_SEL_25MD4 0
-#define ALX_MDIO_CLK_SEL_25MD128 7
-#define ALX_MDIO_START BIT(23)
-#define ALX_MDIO_SPRES_PRMBL BIT(22)
-/* bit21: 1:read,0:write */
-#define ALX_MDIO_OP_READ BIT(21)
-#define ALX_MDIO_REG_MASK 0x1F
-#define ALX_MDIO_REG_SHIFT 16
-#define ALX_MDIO_DATA_MASK 0xFFFF
-#define ALX_MDIO_DATA_SHIFT 0
-#define ALX_MDIO_MAX_AC_TO 120
-
-#define ALX_MDIO_EXTN 0x1448
-#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F
-#define ALX_MDIO_EXTN_DEVAD_SHIFT 16
-#define ALX_MDIO_EXTN_REG_MASK 0xFFFF
-#define ALX_MDIO_EXTN_REG_SHIFT 0
-
-#define ALX_SERDES 0x1424
-#define ALX_SERDES_PHYCLK_SLWDWN BIT(18)
-#define ALX_SERDES_MACCLK_SLWDWN BIT(17)
-
-#define ALX_LPI_CTRL 0x1440
-#define ALX_LPI_CTRL_EN BIT(0)
-
-/* for B0+, bit[13..] for C0+ */
-#define ALX_HRTBT_EXT_CTRL 0x1AD0
-#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F
-#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24
-#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23)
-#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22)
-#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21)
-#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20)
-#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19)
-#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18)
-#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17)
-#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16)
-#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15)
-#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14)
-#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13)
-#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12)
-#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF
-#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4
-#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3)
-#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2)
-#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1)
-#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0)
-
-#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4
-#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478
-#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8
-#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC
-#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0
-#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4
-
-/* 1B8C ~ 1B94 for C0+ */
-#define ALX_SWOI_ACER_CTRL 0x1B8C
-#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20)
-#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF
-#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12
-#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF
-#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0
-
-#define ALX_SWOI_IOAC_CTRL_2 0x1B90
-#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF
-#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24
-#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF
-#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12
-#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF
-#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0
-
-#define ALX_SWOI_IOAC_CTRL_3 0x1B94
-#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF
-#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24
-#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF
-#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12
-#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF
-#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0
-
-/* for B0 */
-#define ALX_IDLE_DECISN_TIMER 0x1474
-/* 1ms */
-#define ALX_IDLE_DECISN_TIMER_DEF 0x400
-
-#define ALX_MAC_CTRL 0x1480
-#define ALX_MAC_CTRL_FAST_PAUSE BIT(31)
-#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30)
-/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
-#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29)
-#define ALX_MAC_CTRL_BRD_EN BIT(26)
-#define ALX_MAC_CTRL_MULTIALL_EN BIT(25)
-#define ALX_MAC_CTRL_SPEED_MASK 0x3
-#define ALX_MAC_CTRL_SPEED_SHIFT 20
-#define ALX_MAC_CTRL_SPEED_10_100 1
-#define ALX_MAC_CTRL_SPEED_1000 2
-#define ALX_MAC_CTRL_PROMISC_EN BIT(15)
-#define ALX_MAC_CTRL_VLANSTRIP BIT(14)
-#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF
-#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10
-#define ALX_MAC_CTRL_PCRCE BIT(7)
-#define ALX_MAC_CTRL_CRCE BIT(6)
-#define ALX_MAC_CTRL_FULLD BIT(5)
-#define ALX_MAC_CTRL_RXFC_EN BIT(3)
-#define ALX_MAC_CTRL_TXFC_EN BIT(2)
-#define ALX_MAC_CTRL_RX_EN BIT(1)
-#define ALX_MAC_CTRL_TX_EN BIT(0)
-
-#define ALX_STAD0 0x1488
-#define ALX_STAD1 0x148C
-
-#define ALX_HASH_TBL0 0x1490
-#define ALX_HASH_TBL1 0x1494
-
-#define ALX_MTU 0x149C
-#define ALX_MTU_JUMBO_TH 1514
-#define ALX_MTU_STD_ALGN 1536
-
-#define ALX_SRAM5 0x1524
-#define ALX_SRAM_RXF_LEN_MASK 0xFFF
-#define ALX_SRAM_RXF_LEN_SHIFT 0
-#define ALX_SRAM_RXF_LEN_8K (8*1024)
-
-#define ALX_SRAM9 0x1534
-#define ALX_SRAM_LOAD_PTR BIT(0)
-
-#define ALX_RX_BASE_ADDR_HI 0x1540
-
-#define ALX_TX_BASE_ADDR_HI 0x1544
-
-#define ALX_RFD_ADDR_LO 0x1550
-#define ALX_RFD_RING_SZ 0x1560
-#define ALX_RFD_BUF_SZ 0x1564
-
-#define ALX_RRD_ADDR_LO 0x1568
-#define ALX_RRD_RING_SZ 0x1578
-
-/* pri3: highest, pri0: lowest */
-#define ALX_TPD_PRI3_ADDR_LO 0x14E4
-#define ALX_TPD_PRI2_ADDR_LO 0x14E0
-#define ALX_TPD_PRI1_ADDR_LO 0x157C
-#define ALX_TPD_PRI0_ADDR_LO 0x1580
-
-/* producer index is 16bit */
-#define ALX_TPD_PRI3_PIDX 0x1618
-#define ALX_TPD_PRI2_PIDX 0x161A
-#define ALX_TPD_PRI1_PIDX 0x15F0
-#define ALX_TPD_PRI0_PIDX 0x15F2
-
-/* consumer index is 16bit */
-#define ALX_TPD_PRI3_CIDX 0x161C
-#define ALX_TPD_PRI2_CIDX 0x161E
-#define ALX_TPD_PRI1_CIDX 0x15F4
-#define ALX_TPD_PRI0_CIDX 0x15F6
-
-#define ALX_TPD_RING_SZ 0x1584
-
-#define ALX_TXQ0 0x1590
-#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF
-#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16
-#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200
-#define ALX_TXQ0_LSO_8023_EN BIT(7)
-#define ALX_TXQ0_MODE_ENHANCE BIT(6)
-#define ALX_TXQ0_EN BIT(5)
-#define ALX_TXQ0_SUPT_IPOPT BIT(4)
-#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF
-#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0
-#define ALX_TXQ_TPD_BURSTPREF_DEF 5
-
-#define ALX_TXQ1 0x1594
-/* bit11: drop large packet, len > (rfd buf) */
-#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11)
-#define ALX_TXQ1_JUMBO_TSO_TH (7*1024)
-
-#define ALX_RXQ0 0x15A0
-#define ALX_RXQ0_EN BIT(31)
-#define ALX_RXQ0_RSS_HASH_EN BIT(29)
-#define ALX_RXQ0_RSS_MODE_MASK 0x3
-#define ALX_RXQ0_RSS_MODE_SHIFT 26
-#define ALX_RXQ0_RSS_MODE_DIS 0
-#define ALX_RXQ0_RSS_MODE_MQMI 3
-#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F
-#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20
-#define ALX_RXQ0_NUM_RFD_PREF_DEF 8
-#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF
-#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8
-#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100
-#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128
-#define ALX_RXQ0_IPV6_PARSE_EN BIT(7)
-#define ALX_RXQ0_RSS_HSTYP_MASK 0xF
-#define ALX_RXQ0_RSS_HSTYP_SHIFT 2
-#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5)
-#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4)
-#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3)
-#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2)
-#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
- ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
- ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
- ALX_RXQ0_RSS_HSTYP_IPV4_EN)
-#define ALX_RXQ0_ASPM_THRESH_MASK 0x3
-#define ALX_RXQ0_ASPM_THRESH_SHIFT 0
-#define ALX_RXQ0_ASPM_THRESH_100M 3
-
-#define ALX_RXQ2 0x15A8
-#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF
-#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16
-#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF
-#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0
-/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
- * rx-packet(1522) + delay-of-link(64)
- * = 3212.
- */
-#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212
-
-#define ALX_DMA 0x15C0
-#define ALX_DMA_RCHNL_SEL_MASK 0x3
-#define ALX_DMA_RCHNL_SEL_SHIFT 26
-#define ALX_DMA_WDLY_CNT_MASK 0xF
-#define ALX_DMA_WDLY_CNT_SHIFT 16
-#define ALX_DMA_WDLY_CNT_DEF 4
-#define ALX_DMA_RDLY_CNT_MASK 0x1F
-#define ALX_DMA_RDLY_CNT_SHIFT 11
-#define ALX_DMA_RDLY_CNT_DEF 15
-/* bit10: 0:tpd with pri, 1: data */
-#define ALX_DMA_RREQ_PRI_DATA BIT(10)
-#define ALX_DMA_RREQ_BLEN_MASK 0x7
-#define ALX_DMA_RREQ_BLEN_SHIFT 4
-#define ALX_DMA_RORDER_MODE_MASK 0x7
-#define ALX_DMA_RORDER_MODE_SHIFT 0
-#define ALX_DMA_RORDER_MODE_OUT 4
-
-#define ALX_WOL0 0x14A0
-#define ALX_WOL0_PME_LINK BIT(5)
-#define ALX_WOL0_LINK_EN BIT(4)
-#define ALX_WOL0_PME_MAGIC_EN BIT(3)
-#define ALX_WOL0_MAGIC_EN BIT(2)
-
-#define ALX_RFD_PIDX 0x15E0
-
-#define ALX_RFD_CIDX 0x15F8
-
-/* MIB */
-#define ALX_MIB_BASE 0x1700
-#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
-#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
-#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
-#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
-
-#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
-#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
-#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
-#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
-
-#define ALX_ISR 0x1600
-#define ALX_ISR_DIS BIT(31)
-#define ALX_ISR_RX_Q7 BIT(30)
-#define ALX_ISR_RX_Q6 BIT(29)
-#define ALX_ISR_RX_Q5 BIT(28)
-#define ALX_ISR_RX_Q4 BIT(27)
-#define ALX_ISR_PCIE_LNKDOWN BIT(26)
-#define ALX_ISR_RX_Q3 BIT(19)
-#define ALX_ISR_RX_Q2 BIT(18)
-#define ALX_ISR_RX_Q1 BIT(17)
-#define ALX_ISR_RX_Q0 BIT(16)
-#define ALX_ISR_TX_Q0 BIT(15)
-#define ALX_ISR_PHY BIT(12)
-#define ALX_ISR_DMAW BIT(10)
-#define ALX_ISR_DMAR BIT(9)
-#define ALX_ISR_TXF_UR BIT(8)
-#define ALX_ISR_TX_Q3 BIT(7)
-#define ALX_ISR_TX_Q2 BIT(6)
-#define ALX_ISR_TX_Q1 BIT(5)
-#define ALX_ISR_RFD_UR BIT(4)
-#define ALX_ISR_RXF_OV BIT(3)
-#define ALX_ISR_MANU BIT(2)
-#define ALX_ISR_TIMER BIT(1)
-#define ALX_ISR_SMB BIT(0)
-
-#define ALX_IMR 0x1604
-
-/* re-send assert msg if SW no response */
-#define ALX_INT_RETRIG 0x1608
-/* 40ms */
-#define ALX_INT_RETRIG_TO 20000
-
-#define ALX_SMB_TIMER 0x15C4
-
-#define ALX_TINT_TPD_THRSHLD 0x15C8
-
-#define ALX_TINT_TIMER 0x15CC
-
-#define ALX_CLK_GATE 0x1814
-#define ALX_CLK_GATE_RXMAC BIT(5)
-#define ALX_CLK_GATE_TXMAC BIT(4)
-#define ALX_CLK_GATE_RXQ BIT(3)
-#define ALX_CLK_GATE_TXQ BIT(2)
-#define ALX_CLK_GATE_DMAR BIT(1)
-#define ALX_CLK_GATE_DMAW BIT(0)
-#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \
- ALX_CLK_GATE_TXMAC | \
- ALX_CLK_GATE_RXQ | \
- ALX_CLK_GATE_TXQ | \
- ALX_CLK_GATE_DMAR | \
- ALX_CLK_GATE_DMAW)
-
-/* interop between drivers */
-#define ALX_DRV 0x1804
-#define ALX_DRV_PHY_AUTO BIT(28)
-#define ALX_DRV_PHY_1000 BIT(27)
-#define ALX_DRV_PHY_100 BIT(26)
-#define ALX_DRV_PHY_10 BIT(25)
-#define ALX_DRV_PHY_DUPLEX BIT(24)
-/* bit23: adv Pause */
-#define ALX_DRV_PHY_PAUSE BIT(23)
-/* bit22: adv Asym Pause */
-#define ALX_DRV_PHY_MASK 0xFF
-#define ALX_DRV_PHY_SHIFT 21
-#define ALX_DRV_PHY_UNKNOWN 0
-
-/* flag of phy inited */
-#define ALX_PHY_INITED 0x003F
-
-/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
-#define ALX_WOL_CTRL2 0x1830
-#define ALX_WOL_CTRL2_DATA_STORE BIT(3)
-#define ALX_WOL_CTRL2_PTRN_EVT BIT(2)
-#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1)
-#define ALX_WOL_CTRL2_PTRN_EN BIT(0)
-
-#define ALX_WOL_CTRL3 0x1834
-#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF
-#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0
-
-#define ALX_WOL_CTRL4 0x1838
-#define ALX_WOL_CTRL4_PT15_MATCH BIT(31)
-#define ALX_WOL_CTRL4_PT14_MATCH BIT(30)
-#define ALX_WOL_CTRL4_PT13_MATCH BIT(29)
-#define ALX_WOL_CTRL4_PT12_MATCH BIT(28)
-#define ALX_WOL_CTRL4_PT11_MATCH BIT(27)
-#define ALX_WOL_CTRL4_PT10_MATCH BIT(26)
-#define ALX_WOL_CTRL4_PT9_MATCH BIT(25)
-#define ALX_WOL_CTRL4_PT8_MATCH BIT(24)
-#define ALX_WOL_CTRL4_PT7_MATCH BIT(23)
-#define ALX_WOL_CTRL4_PT6_MATCH BIT(22)
-#define ALX_WOL_CTRL4_PT5_MATCH BIT(21)
-#define ALX_WOL_CTRL4_PT4_MATCH BIT(20)
-#define ALX_WOL_CTRL4_PT3_MATCH BIT(19)
-#define ALX_WOL_CTRL4_PT2_MATCH BIT(18)
-#define ALX_WOL_CTRL4_PT1_MATCH BIT(17)
-#define ALX_WOL_CTRL4_PT0_MATCH BIT(16)
-#define ALX_WOL_CTRL4_PT15_EN BIT(15)
-#define ALX_WOL_CTRL4_PT14_EN BIT(14)
-#define ALX_WOL_CTRL4_PT13_EN BIT(13)
-#define ALX_WOL_CTRL4_PT12_EN BIT(12)
-#define ALX_WOL_CTRL4_PT11_EN BIT(11)
-#define ALX_WOL_CTRL4_PT10_EN BIT(10)
-#define ALX_WOL_CTRL4_PT9_EN BIT(9)
-#define ALX_WOL_CTRL4_PT8_EN BIT(8)
-#define ALX_WOL_CTRL4_PT7_EN BIT(7)
-#define ALX_WOL_CTRL4_PT6_EN BIT(6)
-#define ALX_WOL_CTRL4_PT5_EN BIT(5)
-#define ALX_WOL_CTRL4_PT4_EN BIT(4)
-#define ALX_WOL_CTRL4_PT3_EN BIT(3)
-#define ALX_WOL_CTRL4_PT2_EN BIT(2)
-#define ALX_WOL_CTRL4_PT1_EN BIT(1)
-#define ALX_WOL_CTRL4_PT0_EN BIT(0)
-
-#define ALX_WOL_CTRL5 0x183C
-#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24
-#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16
-#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8
-#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0
-
-#define ALX_WOL_CTRL6 0x1840
-#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24
-#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16
-#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8
-#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0
-
-#define ALX_WOL_CTRL7 0x1844
-#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24
-#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16
-#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8
-#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0
-
-#define ALX_WOL_CTRL8 0x1848
-#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24
-#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16
-#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8
-#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF
-#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0
-
-#define ALX_ACER_FIXED_PTN0 0x1850
-#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF
-#define ALX_ACER_FIXED_PTN0_SHIFT 0
-
-#define ALX_ACER_FIXED_PTN1 0x1854
-#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF
-#define ALX_ACER_FIXED_PTN1_SHIFT 0
-
-#define ALX_ACER_RANDOM_NUM0 0x1858
-#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF
-#define ALX_ACER_RANDOM_NUM0_SHIFT 0
-
-#define ALX_ACER_RANDOM_NUM1 0x185C
-#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF
-#define ALX_ACER_RANDOM_NUM1_SHIFT 0
-
-#define ALX_ACER_RANDOM_NUM2 0x1860
-#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF
-#define ALX_ACER_RANDOM_NUM2_SHIFT 0
-
-#define ALX_ACER_RANDOM_NUM3 0x1864
-#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF
-#define ALX_ACER_RANDOM_NUM3_SHIFT 0
-
-#define ALX_ACER_MAGIC 0x1868
-#define ALX_ACER_MAGIC_EN BIT(31)
-#define ALX_ACER_MAGIC_PME_EN BIT(30)
-#define ALX_ACER_MAGIC_MATCH BIT(29)
-#define ALX_ACER_MAGIC_FF_CHECK BIT(10)
-#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F
-#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5
-#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F
-#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0
-
-#define ALX_ACER_TIMER 0x186C
-#define ALX_ACER_TIMER_EN BIT(31)
-#define ALX_ACER_TIMER_PME_EN BIT(30)
-#define ALX_ACER_TIMER_MATCH BIT(29)
-#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF
-#define ALX_ACER_TIMER_THRES_SHIFT 0
-#define ALX_ACER_TIMER_THRES_DEF 1
-
-/* RSS definitions */
-#define ALX_RSS_KEY0 0x14B0
-#define ALX_RSS_KEY1 0x14B4
-#define ALX_RSS_KEY2 0x14B8
-#define ALX_RSS_KEY3 0x14BC
-#define ALX_RSS_KEY4 0x14C0
-#define ALX_RSS_KEY5 0x14C4
-#define ALX_RSS_KEY6 0x14C8
-#define ALX_RSS_KEY7 0x14CC
-#define ALX_RSS_KEY8 0x14D0
-#define ALX_RSS_KEY9 0x14D4
-
-#define ALX_RSS_IDT_TBL0 0x1B00
-
-#define ALX_MSI_MAP_TBL1 0x15D0
-#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20
-#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16
-#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12
-#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8
-#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4
-#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0
-
-#define ALX_MSI_MAP_TBL2 0x15D8
-#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20
-#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16
-#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12
-#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8
-#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4
-#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0
-
-#define ALX_MSI_ID_MAP 0x15D4
-
-#define ALX_MSI_RETRANS_TIMER 0x1920
-/* bit16: 1:line,0:standard */
-#define ALX_MSI_MASK_SEL_LINE BIT(16)
-#define ALX_MSI_RETRANS_TM_MASK 0xFFFF
-#define ALX_MSI_RETRANS_TM_SHIFT 0
-
-/* CR DMA ctrl */
-
-/* TX QoS */
-#define ALX_WRR 0x1938
-#define ALX_WRR_PRI_MASK 0x3
-#define ALX_WRR_PRI_SHIFT 29
-#define ALX_WRR_PRI_RESTRICT_NONE 3
-#define ALX_WRR_PRI3_MASK 0x1F
-#define ALX_WRR_PRI3_SHIFT 24
-#define ALX_WRR_PRI2_MASK 0x1F
-#define ALX_WRR_PRI2_SHIFT 16
-#define ALX_WRR_PRI1_MASK 0x1F
-#define ALX_WRR_PRI1_SHIFT 8
-#define ALX_WRR_PRI0_MASK 0x1F
-#define ALX_WRR_PRI0_SHIFT 0
-
-#define ALX_HQTPD 0x193C
-#define ALX_HQTPD_BURST_EN BIT(31)
-#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF
-#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8
-#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF
-#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4
-#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF
-#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0
-
-#define ALX_MISC 0x19C0
-#define ALX_MISC_PSW_OCP_MASK 0x7
-#define ALX_MISC_PSW_OCP_SHIFT 21
-#define ALX_MISC_PSW_OCP_DEF 0x7
-#define ALX_MISC_ISO_EN BIT(12)
-#define ALX_MISC_INTNLOSC_OPEN BIT(3)
-
-#define ALX_MSIC2 0x19C8
-#define ALX_MSIC2_CALB_START BIT(0)
-
-#define ALX_MISC3 0x19CC
-/* bit1: 1:Software control 25M */
-#define ALX_MISC3_25M_BY_SW BIT(1)
-/* bit0: 25M switch to intnl OSC */
-#define ALX_MISC3_25M_NOTO_INTNL BIT(0)
-
-/* MSIX tbl in memory space */
-#define ALX_MSIX_ENTRY_BASE 0x2000
-
-/********************* PHY regs definition ***************************/
-
-/* PHY Specific Status Register */
-#define ALX_MII_GIGA_PSSR 0x11
-#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800
-#define ALX_GIGA_PSSR_DPLX 0x2000
-#define ALX_GIGA_PSSR_SPEED 0xC000
-#define ALX_GIGA_PSSR_10MBS 0x0000
-#define ALX_GIGA_PSSR_100MBS 0x4000
-#define ALX_GIGA_PSSR_1000MBS 0x8000
-
-/* PHY Interrupt Enable Register */
-#define ALX_MII_IER 0x12
-#define ALX_IER_LINK_UP 0x0400
-#define ALX_IER_LINK_DOWN 0x0800
-
-/* PHY Interrupt Status Register */
-#define ALX_MII_ISR 0x13
-
-#define ALX_MII_DBG_ADDR 0x1D
-#define ALX_MII_DBG_DATA 0x1E
-
-/***************************** debug port *************************************/
-
-#define ALX_MIIDBG_ANACTRL 0x00
-#define ALX_ANACTRL_DEF 0x02EF
-
-#define ALX_MIIDBG_SYSMODCTRL 0x04
-/* en half bias */
-#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B
-
-#define ALX_MIIDBG_SRDSYSMOD 0x05
-#define ALX_SRDSYSMOD_DEEMP_EN 0x0040
-#define ALX_SRDSYSMOD_DEF 0x2C46
-
-#define ALX_MIIDBG_HIBNEG 0x0B
-#define ALX_HIBNEG_PSHIB_EN 0x8000
-#define ALX_HIBNEG_HIB_PSE 0x1000
-#define ALX_HIBNEG_DEF 0xBC40
-#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \
- ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
-
-#define ALX_MIIDBG_TST10BTCFG 0x12
-#define ALX_TST10BTCFG_DEF 0x4C04
-
-#define ALX_MIIDBG_AZ_ANADECT 0x15
-#define ALX_AZ_ANADECT_DEF 0x3220
-#define ALX_AZ_ANADECT_LONG 0x3210
-
-#define ALX_MIIDBG_MSE16DB 0x18
-#define ALX_MSE16DB_UP 0x05EA
-#define ALX_MSE16DB_DOWN 0x02EA
-
-#define ALX_MIIDBG_MSE20DB 0x1C
-#define ALX_MSE20DB_TH_MASK 0x7F
-#define ALX_MSE20DB_TH_SHIFT 2
-#define ALX_MSE20DB_TH_DEF 0x2E
-#define ALX_MSE20DB_TH_HI 0x54
-
-#define ALX_MIIDBG_AGC 0x23
-#define ALX_AGC_2_VGA_MASK 0x3FU
-#define ALX_AGC_2_VGA_SHIFT 8
-#define ALX_AGC_LONG1G_LIMT 40
-#define ALX_AGC_LONG100M_LIMT 44
-
-#define ALX_MIIDBG_LEGCYPS 0x29
-#define ALX_LEGCYPS_EN 0x8000
-#define ALX_LEGCYPS_DEF 0x129D
-
-#define ALX_MIIDBG_TST100BTCFG 0x36
-#define ALX_TST100BTCFG_DEF 0xE12C
-
-#define ALX_MIIDBG_GREENCFG 0x3B
-#define ALX_GREENCFG_DEF 0x7078
-
-#define ALX_MIIDBG_GREENCFG2 0x3D
-#define ALX_GREENCFG2_BP_GREEN 0x8000
-#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080
-
-/******* dev 3 *********/
-#define ALX_MIIEXT_PCS 3
-
-#define ALX_MIIEXT_CLDCTRL3 0x8003
-#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
-
-#define ALX_MIIEXT_CLDCTRL5 0x8005
-#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000
-
-#define ALX_MIIEXT_CLDCTRL6 0x8006
-#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF
-#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0
-#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116
-#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152
-
-#define ALX_MIIEXT_VDRVBIAS 0x8062
-#define ALX_VDRVBIAS_DEF 0x3
-
-/********* dev 7 **********/
-#define ALX_MIIEXT_ANEG 7
-
-#define ALX_MIIEXT_LOCAL_EEEADV 0x3C
-#define ALX_LOCAL_EEEADV_1000BT 0x0004
-#define ALX_LOCAL_EEEADV_100BT 0x0002
-
-#define ALX_MIIEXT_AFE 0x801A
-#define ALX_AFE_10BT_100M_TH 0x0040
-
-#define ALX_MIIEXT_S3DIG10 0x8023
-/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
-#define ALX_MIIEXT_S3DIG10_SL 0x0001
-#define ALX_MIIEXT_S3DIG10_DEF 0
-
-#define ALX_MIIEXT_NLP78 0x8027
-#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05
-
-#endif
diff --git a/trunk/drivers/net/ethernet/broadcom/tg3.c b/trunk/drivers/net/ethernet/broadcom/tg3.c
index a13463e8a2c3..0f493c8dc28b 100644
--- a/trunk/drivers/net/ethernet/broadcom/tg3.c
+++ b/trunk/drivers/net/ethernet/broadcom/tg3.c
@@ -744,9 +744,6 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
status = tg3_ape_read32(tp, gnt + off);
if (status == bit)
break;
- if (pci_channel_offline(tp->pdev))
- break;
-
udelay(10);
}
@@ -1638,9 +1635,6 @@ static void tg3_wait_for_event_ack(struct tg3 *tp)
for (i = 0; i < delay_cnt; i++) {
if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
break;
- if (pci_channel_offline(tp->pdev))
- break;
-
udelay(8);
}
}
@@ -1806,9 +1800,6 @@ static int tg3_poll_fw(struct tg3 *tp)
int i;
u32 val;
- if (tg3_flag(tp, NO_FWARE_REPORTED))
- return 0;
-
if (tg3_flag(tp, IS_SSB_CORE)) {
/* We don't use firmware. */
return 0;
@@ -1819,9 +1810,6 @@ static int tg3_poll_fw(struct tg3 *tp)
for (i = 0; i < 200; i++) {
if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
return 0;
- if (pci_channel_offline(tp->pdev))
- return -ENODEV;
-
udelay(100);
}
return -ENODEV;
@@ -1832,15 +1820,6 @@ static int tg3_poll_fw(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
break;
- if (pci_channel_offline(tp->pdev)) {
- if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
- tg3_flag_set(tp, NO_FWARE_REPORTED);
- netdev_info(tp->dev, "No firmware running\n");
- }
-
- break;
- }
-
udelay(10);
}
@@ -3538,8 +3517,6 @@ static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
break;
- if (pci_channel_offline(tp->pdev))
- return -EBUSY;
}
return (i == iters) ? -EBUSY : 0;
@@ -8609,14 +8586,6 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, boo
tw32_f(ofs, val);
for (i = 0; i < MAX_WAIT_CNT; i++) {
- if (pci_channel_offline(tp->pdev)) {
- dev_err(&tp->pdev->dev,
- "tg3_stop_block device offline, "
- "ofs=%lx enable_bit=%x\n",
- ofs, enable_bit);
- return -ENODEV;
- }
-
udelay(100);
val = tr32(ofs);
if ((val & enable_bit) == 0)
@@ -8640,13 +8609,6 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
tg3_disable_ints(tp);
- if (pci_channel_offline(tp->pdev)) {
- tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
- tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
- err = -ENODEV;
- goto err_no_dev;
- }
-
tp->rx_mode &= ~RX_MODE_ENABLE;
tw32_f(MAC_RX_MODE, tp->rx_mode);
udelay(10);
@@ -8695,7 +8657,6 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
-err_no_dev:
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
if (tnapi->hw_status)
@@ -10443,13 +10404,6 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
*/
static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
{
- /* Chip may have been just powered on. If so, the boot code may still
- * be running initialization. Wait for it to finish to avoid races in
- * accessing the hardware.
- */
- tg3_enable_register_access(tp);
- tg3_poll_fw(tp);
-
tg3_switch_clocks(tp);
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
diff --git a/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 94d957d203a6..6e8bc9d88c41 100644
--- a/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/trunk/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
file->f_pos += offset;
break;
case 2:
- file->f_pos = debug->buffer_len + offset;
+ file->f_pos = debug->buffer_len - offset;
break;
default:
return -EINVAL;
diff --git a/trunk/drivers/net/ethernet/dec/tulip/interrupt.c b/trunk/drivers/net/ethernet/dec/tulip/interrupt.c
index 92306b320840..28a5e425fecf 100644
--- a/trunk/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/trunk/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -76,12 +76,6 @@ int tulip_refill_rx(struct net_device *dev)
mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&tp->pdev->dev, mapping)) {
- dev_kfree_skb(skb);
- tp->rx_buffers[entry].skb = NULL;
- break;
- }
-
tp->rx_buffers[entry].mapping = mapping;
tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c
index a0b4be51f0d1..8bc1b21b1c79 100644
--- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4262,9 +4262,6 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
netdev->features |= NETIF_F_HIGHDMA;
} else {
status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (!status)
- status = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
diff --git a/trunk/drivers/net/ethernet/freescale/fec_main.c b/trunk/drivers/net/ethernet/freescale/fec_main.c
index d48099f03b7f..a667015be22a 100644
--- a/trunk/drivers/net/ethernet/freescale/fec_main.c
+++ b/trunk/drivers/net/ethernet/freescale/fec_main.c
@@ -516,7 +516,6 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set MII speed */
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-#if !defined(CONFIG_M5272)
/* set RX checksum */
val = readl(fep->hwp + FEC_RACC);
if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
@@ -524,7 +523,6 @@ fec_restart(struct net_device *ndev, int duplex)
else
val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC);
-#endif
/*
* The phy interface and speed need to get configured
@@ -577,7 +575,6 @@ fec_restart(struct net_device *ndev, int duplex)
#endif
}
-#if !defined(CONFIG_M5272)
/* enable pause frame*/
if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
@@ -595,7 +592,6 @@ fec_restart(struct net_device *ndev, int duplex)
} else {
rcntl &= ~FEC_ENET_FCE;
}
-#endif /* !defined(CONFIG_M5272) */
writel(rcntl, fep->hwp + FEC_R_CNTRL);
@@ -1209,9 +1205,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
/* mask with MAC supported features */
if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
phy_dev->supported &= PHY_GBIT_FEATURES;
-#if !defined(CONFIG_M5272)
phy_dev->supported |= SUPPORTED_Pause;
-#endif
}
else
phy_dev->supported &= PHY_BASIC_FEATURES;
@@ -1396,8 +1390,6 @@ static int fec_enet_get_ts_info(struct net_device *ndev,
}
}
-#if !defined(CONFIG_M5272)
-
static void fec_enet_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
@@ -1444,13 +1436,9 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
return 0;
}
-#endif /* !defined(CONFIG_M5272) */
-
static const struct ethtool_ops fec_enet_ethtool_ops = {
-#if !defined(CONFIG_M5272)
.get_pauseparam = fec_enet_get_pauseparam,
.set_pauseparam = fec_enet_set_pauseparam,
-#endif
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
@@ -1886,12 +1874,10 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */
fep = netdev_priv(ndev);
-#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
if (pdev->id_entry &&
(pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
-#endif
fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
fep->pdev = pdev;
diff --git a/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c b/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c
index d1cbfb12c1ca..2ad1494efbb3 100644
--- a/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1757,7 +1757,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
memset(rxq->rx_desc_area, 0, size);
rxq->rx_desc_area_size = size;
- rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
+ rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
GFP_KERNEL);
if (rxq->rx_skb == NULL)
goto out_free;
diff --git a/trunk/drivers/net/ethernet/marvell/pxa168_eth.c b/trunk/drivers/net/ethernet/marvell/pxa168_eth.c
index 1c8af8ba08d9..339bb323cb0c 100644
--- a/trunk/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/trunk/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1015,7 +1015,7 @@ static int rxq_init(struct net_device *dev)
int rx_desc_num = pep->rx_ring_size;
/* Allocate RX skb rings */
- pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+ pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
GFP_KERNEL);
if (!pep->rx_skb)
return -ENOMEM;
@@ -1076,7 +1076,7 @@ static int txq_init(struct net_device *dev)
int size = 0, i = 0;
int tx_desc_num = pep->tx_ring_size;
- pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+ pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
GFP_KERNEL);
if (!pep->tx_skb)
return -ENOMEM;
diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c
index 8a434997a0df..2f4a26039e80 100644
--- a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -632,9 +632,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.cqe_size = 32;
}
- dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
- mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
-
slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
return 0;
diff --git a/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c b/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c
index 91a8a5d28037..921729f9c85c 100644
--- a/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -46,25 +46,17 @@
union mgmt_port_ring_entry {
u64 d64;
struct {
-#define RING_ENTRY_CODE_DONE 0xf
-#define RING_ENTRY_CODE_MORE 0x10
-#ifdef __BIG_ENDIAN_BITFIELD
- u64 reserved_62_63:2;
+ u64 reserved_62_63:2;
/* Length of the buffer/packet in bytes */
- u64 len:14;
+ u64 len:14;
/* For TX, signals that the packet should be timestamped */
- u64 tstamp:1;
+ u64 tstamp:1;
/* The RX error code */
- u64 code:7;
+ u64 code:7;
+#define RING_ENTRY_CODE_DONE 0xf
+#define RING_ENTRY_CODE_MORE 0x10
/* Physical address of the buffer */
- u64 addr:40;
-#else
- u64 addr:40;
- u64 code:7;
- u64 tstamp:1;
- u64 len:14;
- u64 reserved_62_63:2;
-#endif
+ u64 addr:40;
} s;
};
@@ -1149,13 +1141,10 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* For compensation state to lock. */
ndelay(1040 * NS_PER_PHY_CLK);
- /* Default Interframe Gaps are too small. Recommended
- * workaround is.
- *
- * AGL_GMX_TX_IFG[IFG1]=14
- * AGL_GMX_TX_IFG[IFG2]=10
+ /* Some Ethernet switches cannot handle standard
+ * Interframe Gap, increase to 16 bytes.
*/
- cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
+ cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88);
}
octeon_mgmt_rx_fill_ring(netdev);
diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 6acf82b9f018..43562c256379 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -642,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
qlcnic_83xx_config_intrpt(adapter, 0);
}
/* Allow dma queues to drain after context reset */
- mdelay(20);
+ msleep(20);
}
}
diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.c b/trunk/drivers/net/ethernet/renesas/sh_eth.c
index e29fe8dbd226..b4479b5aaee4 100644
--- a/trunk/drivers/net/ethernet/renesas/sh_eth.c
+++ b/trunk/drivers/net/ethernet/renesas/sh_eth.c
@@ -380,9 +380,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = 0x01ff009f,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
- .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
- EESR_ECI,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
.tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
.apr = 1,
@@ -428,9 +427,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
- .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
- EESR_ECI,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
.tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
.apr = 1,
@@ -480,9 +478,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.rmcr_value = 0x00000001,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
- .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
- EESR_ECI,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
.tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
.apr = 1,
@@ -595,9 +592,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC,
- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
- EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
- EESR_TDE | EESR_ECI,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ EESR_ECI,
.tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
EESR_TFE,
.fdr_value = 0x0000072f,
@@ -677,9 +674,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC,
- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
- EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
- EESR_TDE | EESR_ECI,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ EESR_ECI,
.tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
EESR_TFE,
@@ -814,9 +811,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC,
- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
- EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
- EESR_TDE | EESR_ECI,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ EESR_ECI,
.tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
EESR_TFE,
@@ -1404,23 +1401,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
+#if defined(CONFIG_ARCH_R8A7740)
+ desc_status >>= 16;
+#endif
+
if (--boguscnt < 0)
break;
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
-#if defined(CONFIG_ARCH_R8A7740)
- /*
- * In case of almost all GETHER/ETHERs, the Receive Frame State
- * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
- * bit 0. However, in case of the R8A7740's GETHER, the RFS
- * bits are from bit 25 to bit 16. So, the driver needs right
- * shifting by 16.
- */
- desc_status >>= 16;
-#endif
-
if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
RD_RFS5 | RD_RFS6 | RD_RFS10)) {
ndev->stats.rx_errors++;
@@ -1552,12 +1542,11 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
ignore_link:
if (intr_status & EESR_TWB) {
- /* Unused write back interrupt */
- if (intr_status & EESR_TABT) { /* Transmit Abort int */
+ /* Write buck end. unused write back interrupt */
+ if (intr_status & EESR_TABT) /* Transmit Abort int */
ndev->stats.tx_aborted_errors++;
if (netif_msg_tx_err(mdp))
dev_err(&ndev->dev, "Transmit Abort\n");
- }
}
if (intr_status & EESR_RABT) {
diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.h b/trunk/drivers/net/ethernet/renesas/sh_eth.h
index 62689a5823be..1ddc9f235bcb 100644
--- a/trunk/drivers/net/ethernet/renesas/sh_eth.h
+++ b/trunk/drivers/net/ethernet/renesas/sh_eth.h
@@ -253,7 +253,7 @@ enum EESR_BIT {
#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
EESR_RTO)
-#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
+#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \
EESR_RDE | EESR_RFRMER | EESR_ADE | \
EESR_TFE | EESR_TDE | EESR_ECI)
#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
diff --git a/trunk/drivers/net/ethernet/sfc/efx.c b/trunk/drivers/net/ethernet/sfc/efx.c
index 4a14a940c65e..39e4cb39de29 100644
--- a/trunk/drivers/net/ethernet/sfc/efx.c
+++ b/trunk/drivers/net/ethernet/sfc/efx.c
@@ -2139,7 +2139,7 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", efx->phy_type);
}
-static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
static int efx_register_netdev(struct efx_nic *efx)
{
diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/common.h b/trunk/drivers/net/ethernet/stmicro/stmmac/common.h
index 95176979b2d2..7788fbe44f0a 100644
--- a/trunk/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/trunk/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -297,8 +297,8 @@ struct dma_features {
#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
/* Default LPI timers */
-#define STMMAC_DEFAULT_LIT_LS 0x3E8
-#define STMMAC_DEFAULT_TWT_LS 0x0
+#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
+#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e9eab29db7be..618446ae1ec1 100644
--- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -130,7 +130,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
module_param(eee_timer, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
-#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
+#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
/* By default the driver will use the ring mode to manage tx and rx descriptors
* but passing this value so user can force to use the chain instead of the ring
@@ -288,7 +288,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
struct stmmac_priv *priv = (struct stmmac_priv *)arg;
stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
}
/**
@@ -304,34 +304,22 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
{
bool ret = false;
- /* Using PCS we cannot dial with the phy registers at this stage
- * so we do not support extra feature like EEE.
- */
- if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
- (priv->pcs == STMMAC_PCS_RTBI))
- goto out;
-
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
/* Check if the PHY supports EEE */
if (phy_init_eee(priv->phydev, 1))
goto out;
- if (!priv->eee_active) {
- priv->eee_active = 1;
- init_timer(&priv->eee_ctrl_timer);
- priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
- priv->eee_ctrl_timer.data = (unsigned long)priv;
- priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
- add_timer(&priv->eee_ctrl_timer);
-
- priv->hw->mac->set_eee_timer(priv->ioaddr,
- STMMAC_DEFAULT_LIT_LS,
- priv->tx_lpi_timer);
- } else
- /* Set HW EEE according to the speed */
- priv->hw->mac->set_eee_pls(priv->ioaddr,
- priv->phydev->link);
+ priv->eee_active = 1;
+ init_timer(&priv->eee_ctrl_timer);
+ priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
+ priv->eee_ctrl_timer.data = (unsigned long)priv;
+ priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
+ add_timer(&priv->eee_ctrl_timer);
+
+ priv->hw->mac->set_eee_timer(priv->ioaddr,
+ STMMAC_DEFAULT_LIT_LS_TIMER,
+ priv->tx_lpi_timer);
pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
@@ -341,6 +329,20 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
return ret;
}
+/**
+ * stmmac_eee_adjust: adjust HW EEE according to the speed
+ * @priv: driver private structure
+ * Description:
+ * When the EEE has been already initialised we have to
+ * modify the PLS bit in the LPI ctrl & status reg according
+ * to the PHY link status. For this reason.
+ */
+static void stmmac_eee_adjust(struct stmmac_priv *priv)
+{
+ if (priv->eee_enabled)
+ priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+}
+
/* stmmac_get_tx_hwtstamp: get HW TX timestamps
* @priv: driver private structure
* @entry : descriptor index to be used.
@@ -767,10 +769,7 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
- /* At this stage, it could be needed to setup the EEE or adjust some
- * MAC related HW registers.
- */
- priv->eee_enabled = stmmac_eee_init(priv);
+ stmmac_eee_adjust(priv);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1278,7 +1277,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
}
spin_unlock(&priv->tx_lock);
}
@@ -1672,9 +1671,14 @@ static int stmmac_open(struct net_device *dev)
if (priv->phydev)
phy_start(priv->phydev);
- priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
+ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
- priv->eee_enabled = stmmac_eee_init(priv);
+ /* Using PCS we cannot dial with the phy registers at this stage
+ * so we do not support extra feature like EEE.
+ */
+ if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
+ priv->pcs != STMMAC_PCS_RTBI)
+ priv->eee_enabled = stmmac_eee_init(priv);
stmmac_init_tx_coalesce(priv);
@@ -1895,7 +1899,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef STMMAC_XMIT_DEBUG
if (netif_msg_pktdata(priv)) {
- pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
+ pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d"
__func__, (priv->cur_tx % txsize),
(priv->dirty_tx % txsize), entry, first, nfrags);
if (priv->extend_desc)
diff --git a/trunk/drivers/net/ethernet/ti/cpsw.c b/trunk/drivers/net/ethernet/ti/cpsw.c
index d1a769f35f9d..21a5b291b4b3 100644
--- a/trunk/drivers/net/ethernet/ti/cpsw.c
+++ b/trunk/drivers/net/ethernet/ti/cpsw.c
@@ -1679,7 +1679,7 @@ static int cpsw_probe(struct platform_device *pdev)
priv->rx_packet_max = max(rx_packet_max, 128);
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
priv->irq_enabled = true;
- if (!priv->cpts) {
+ if (!ndev) {
pr_err("error allocating cpts\n");
goto clean_ndev_ret;
}
@@ -1973,12 +1973,9 @@ static int cpsw_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_priv *priv = netdev_priv(ndev);
if (netif_running(ndev))
cpsw_ndo_stop(ndev);
- soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset);
- soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset);
pm_runtime_put_sync(&pdev->dev);
return 0;
diff --git a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c
index 053c84fd0853..49dfd592ac1e 100644
--- a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -705,13 +705,6 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
}
buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
- ret = dma_mapping_error(ctlr->dev, buffer);
- if (ret) {
- cpdma_desc_free(ctlr->pool, desc, 1);
- ret = -EINVAL;
- goto unlock_ret;
- }
-
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
cpdma_desc_to_port(chan, mode, directed);
diff --git a/trunk/drivers/net/ethernet/ti/davinci_mdio.c b/trunk/drivers/net/ethernet/ti/davinci_mdio.c
index c47f0dbcebb5..b2275d1b19b3 100644
--- a/trunk/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/trunk/drivers/net/ethernet/ti/davinci_mdio.c
@@ -459,12 +459,15 @@ static int davinci_mdio_suspend(struct device *dev)
static int davinci_mdio_resume(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
pm_runtime_get_sync(data->dev);
spin_lock(&data->lock);
/* restart the scan state machine */
- __davinci_mdio_reset(data);
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl |= CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
data->suspended = false;
spin_unlock(&data->lock);
@@ -473,8 +476,8 @@ static int davinci_mdio_resume(struct device *dev)
}
static const struct dev_pm_ops davinci_mdio_pm_ops = {
- .suspend_late = davinci_mdio_suspend,
- .resume_early = davinci_mdio_resume,
+ .suspend = davinci_mdio_suspend,
+ .resume = davinci_mdio_resume,
};
static const struct of_device_id davinci_mdio_of_mtable[] = {
diff --git a/trunk/drivers/net/hyperv/netvsc_drv.c b/trunk/drivers/net/hyperv/netvsc_drv.c
index 4dccead586be..ab2307b5d9a7 100644
--- a/trunk/drivers/net/hyperv/netvsc_drv.c
+++ b/trunk/drivers/net/hyperv/netvsc_drv.c
@@ -285,9 +285,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
- if (packet->vlan_tci & VLAN_TAG_PRESENT)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- packet->vlan_tci);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci);
net->stats.rx_packets++;
net->stats.rx_bytes += packet->total_data_buflen;
diff --git a/trunk/drivers/net/macvlan.c b/trunk/drivers/net/macvlan.c
index 6e91931a1c2c..1c502bb0c916 100644
--- a/trunk/drivers/net/macvlan.c
+++ b/trunk/drivers/net/macvlan.c
@@ -853,24 +853,18 @@ static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
-
+ if (data && data[IFLA_MACVLAN_MODE])
+ vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
if (data && data[IFLA_MACVLAN_FLAGS]) {
__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
- if (vlan->port->passthru && promisc) {
- int err;
-
- if (flags & MACVLAN_FLAG_NOPROMISC)
- err = dev_set_promiscuity(vlan->lowerdev, -1);
- else
- err = dev_set_promiscuity(vlan->lowerdev, 1);
- if (err < 0)
- return err;
- }
+
+ if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
+ dev_set_promiscuity(vlan->lowerdev, -1);
+ else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
+ dev_set_promiscuity(vlan->lowerdev, 1);
vlan->flags = flags;
}
- if (data && data[IFLA_MACVLAN_MODE])
- vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
return 0;
}
diff --git a/trunk/drivers/net/macvtap.c b/trunk/drivers/net/macvtap.c
index b6dd6a75919a..59e9605de316 100644
--- a/trunk/drivers/net/macvtap.c
+++ b/trunk/drivers/net/macvtap.c
@@ -524,10 +524,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
return -EMSGSIZE;
num_pages = get_user_pages_fast(base, size, 0, &page[i]);
if (num_pages != size) {
- int j;
-
- for (j = 0; j < num_pages; j++)
- put_page(page[i + j]);
+ for (i = 0; i < num_pages; i++)
+ put_page(page[i]);
return -EFAULT;
}
truesize = size * PAGE_SIZE;
diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c
index 9c61f8734a40..bfa9bb48e42d 100644
--- a/trunk/drivers/net/tun.c
+++ b/trunk/drivers/net/tun.c
@@ -1010,10 +1010,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
return -EMSGSIZE;
num_pages = get_user_pages_fast(base, size, 0, &page[i]);
if (num_pages != size) {
- int j;
-
- for (j = 0; j < num_pages; j++)
- put_page(page[i + j]);
+ for (i = 0; i < num_pages; i++)
+ put_page(page[i]);
return -EFAULT;
}
truesize = size * PAGE_SIZE;
diff --git a/trunk/drivers/net/usb/qmi_wwan.c b/trunk/drivers/net/usb/qmi_wwan.c
index 56459215a22b..d095d0d3056b 100644
--- a/trunk/drivers/net/usb/qmi_wwan.c
+++ b/trunk/drivers/net/usb/qmi_wwan.c
@@ -590,13 +590,7 @@ static const struct usb_device_id products[] = {
{QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
- {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */
- {QMI_GOBI1K_DEVICE(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
- {QMI_GOBI1K_DEVICE(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
- {QMI_GOBI1K_DEVICE(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
- {QMI_GOBI1K_DEVICE(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
- {QMI_GOBI1K_DEVICE(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
- {QMI_GOBI1K_DEVICE(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
diff --git a/trunk/drivers/net/vxlan.c b/trunk/drivers/net/vxlan.c
index 57325f356d4f..3b1d2ee7156b 100644
--- a/trunk/drivers/net/vxlan.c
+++ b/trunk/drivers/net/vxlan.c
@@ -565,22 +565,18 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
/* Watch incoming packets to learn mapping between Ethernet address
* and Tunnel endpoint.
- * Return true if packet is bogus and should be droppped.
*/
-static bool vxlan_snoop(struct net_device *dev,
+static void vxlan_snoop(struct net_device *dev,
__be32 src_ip, const u8 *src_mac)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
+ int err;
f = vxlan_find_mac(vxlan, src_mac);
if (likely(f)) {
if (likely(f->remote.remote_ip == src_ip))
- return false;
-
- /* Don't migrate static entries, drop packets */
- if (f->state & NUD_NOARP)
- return true;
+ return;
if (net_ratelimit())
netdev_info(dev,
@@ -592,19 +588,14 @@ static bool vxlan_snoop(struct net_device *dev,
} else {
/* learned new entry */
spin_lock(&vxlan->hash_lock);
-
- /* close off race between vxlan_flush and incoming packets */
- if (netif_running(dev))
- vxlan_fdb_create(vxlan, src_mac, src_ip,
- NUD_REACHABLE,
- NLM_F_EXCL|NLM_F_CREATE,
- vxlan->dst_port,
- vxlan->default_dst.remote_vni,
- 0, NTF_SELF);
+ err = vxlan_fdb_create(vxlan, src_mac, src_ip,
+ NUD_REACHABLE,
+ NLM_F_EXCL|NLM_F_CREATE,
+ vxlan->dst_port,
+ vxlan->default_dst.remote_vni,
+ 0, NTF_SELF);
spin_unlock(&vxlan->hash_lock);
}
-
- return false;
}
@@ -736,9 +727,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
vxlan->dev->dev_addr) == 0)
goto drop;
- if ((vxlan->flags & VXLAN_F_LEARN) &&
- vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
- goto drop;
+ if (vxlan->flags & VXLAN_F_LEARN)
+ vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
__skb_tunnel_rx(skb, vxlan->dev);
skb_reset_network_header(skb);
@@ -1161,11 +1151,9 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *skb1;
skb1 = skb_clone(skb, GFP_ATOMIC);
- if (skb1) {
- rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
- if (rc == NETDEV_TX_OK)
- rc = rc1;
- }
+ rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
+ if (rc == NETDEV_TX_OK)
+ rc = rc1;
}
rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
diff --git a/trunk/drivers/net/wan/dlci.c b/trunk/drivers/net/wan/dlci.c
index 6a8a382c5f4c..147614ed86aa 100644
--- a/trunk/drivers/net/wan/dlci.c
+++ b/trunk/drivers/net/wan/dlci.c
@@ -384,37 +384,21 @@ static int dlci_del(struct dlci_add *dlci)
struct frad_local *flp;
struct net_device *master, *slave;
int err;
- bool found = false;
-
- rtnl_lock();
/* validate slave device */
master = __dev_get_by_name(&init_net, dlci->devname);
- if (!master) {
- err = -ENODEV;
- goto out;
- }
-
- list_for_each_entry(dlp, &dlci_devs, list) {
- if (dlp->master == master) {
- found = true;
- break;
- }
- }
- if (!found) {
- err = -ENODEV;
- goto out;
- }
+ if (!master)
+ return -ENODEV;
if (netif_running(master)) {
- err = -EBUSY;
- goto out;
+ return -EBUSY;
}
dlp = netdev_priv(master);
slave = dlp->slave;
flp = netdev_priv(slave);
+ rtnl_lock();
err = (*flp->deassoc)(slave, master);
if (!err) {
list_del(&dlp->list);
@@ -423,8 +407,8 @@ static int dlci_del(struct dlci_add *dlci)
dev_put(slave);
}
-out:
rtnl_unlock();
+
return err;
}
diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 62f1b7636c92..0743a47cef8f 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1174,7 +1174,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&priv->htc_pm_lock);
priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
- if (!priv->ps_idle)
+ if (priv->ps_idle)
chip_reset = true;
mutex_unlock(&priv->htc_pm_lock);
diff --git a/trunk/drivers/net/wireless/ath/ath9k/xmit.c b/trunk/drivers/net/wireless/ath/ath9k/xmit.c
index 83ab6be3fe6d..1c9b1bac8b0d 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1570,8 +1570,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return;
- rcu_read_lock();
-
ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
@@ -1610,10 +1608,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
if (ac == last_ac ||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
- break;
+ return;
}
-
- rcu_read_unlock();
}
/***********/
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 2c593570497c..b98f2235978e 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -930,10 +930,6 @@ int brcmf_bus_start(struct device *dev)
brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr);
}
- if (drvr->iflist[0]) {
- free_netdev(ifp->ndev);
- drvr->iflist[0] = NULL;
- }
if (p2p_ifp) {
free_netdev(p2p_ifp->ndev);
drvr->iflist[1] = NULL;
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 9fd6f2fef11b..28e7aeedd184 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3074,8 +3074,21 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
*/
static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
{
- /* not supporting PS so always return false for now */
- return false;
+ /* disallow PS when one of the following global conditions meets */
+ if (!wlc->pub->associated)
+ return false;
+
+ /* disallow PS when one of these meets when not scanning */
+ if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
+ return false;
+
+ if (wlc->bsscfg->type == BRCMS_TYPE_AP)
+ return false;
+
+ if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
+ return false;
+
+ return true;
}
static void brcms_c_statsupd(struct brcms_c_info *wlc)
diff --git a/trunk/drivers/net/wireless/iwlegacy/3945-rs.c b/trunk/drivers/net/wireless/iwlegacy/3945-rs.c
index fe31590a51b2..c9f197d9ca1e 100644
--- a/trunk/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/trunk/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -816,7 +816,6 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
rs_sta->last_txrate_idx = idx;
info->control.rates[0].idx = rs_sta->last_txrate_idx;
}
- info->control.rates[0].count = 1;
D_RATE("leave: %d\n", idx);
}
diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-rs.c b/trunk/drivers/net/wireless/iwlegacy/4965-rs.c
index ed3c42a63a43..1fc0b227e120 100644
--- a/trunk/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/trunk/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
info->control.rates[0].flags = 0;
}
info->control.rates[0].idx = rate_idx;
- info->control.rates[0].count = 1;
+
}
static void *
diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c b/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c
index 10fbb176cc8e..907bd6e50aad 100644
--- a/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
info->control.rates[0].flags = 0;
}
info->control.rates[0].idx = rate_idx;
- info->control.rates[0].count = 1;
+
}
static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c b/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c
index cd1ad0019185..707446fa00bd 100644
--- a/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
int ret;
- if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
+ if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED))
return;
if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c b/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c
index 40fed1f511e2..39aad9893e0b 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1000,12 +1000,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
*/
if (load_module) {
err = request_module("%s", op->name);
-#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
if (err)
IWL_ERR(drv,
"failed to load module %s (error %d), is dynamic loading enabled?\n",
op->name, err);
-#endif
}
return;
diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/rs.c b/trunk/drivers/net/wireless/iwlwifi/mvm/rs.c
index b99fe3163866..55334d542e26 100644
--- a/trunk/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/trunk/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -2546,7 +2546,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
info->control.rates[0].flags = 0;
}
info->control.rates[0].idx = rate_idx;
- info->control.rates[0].count = 1;
}
static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c b/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c
index 48c1891e3df6..f212f16502ff 100644
--- a/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -180,8 +180,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
return;
} else if (ieee80211_is_back_req(fc)) {
- tx_cmd->tx_flags |=
- cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
}
/* HT rate doesn't make sense for a non data frame */
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c
index 72f32e5caa4d..b52d70c75e1a 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3027,26 +3027,19 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
* TODO: we do not use +6 dBm option to do not increase power beyond
* regulatory limit, however this could be utilized for devices with
* CAPABILITY_POWER_LIMIT.
- *
- * TODO: add different temperature compensation code for RT3290 & RT5390
- * to allow to use BBP_R1 for those chips.
*/
- if (!rt2x00_rt(rt2x00dev, RT3290) &&
- !rt2x00_rt(rt2x00dev, RT5390)) {
- rt2800_bbp_read(rt2x00dev, 1, &r1);
- if (delta <= -12) {
- power_ctrl = 2;
- delta += 12;
- } else if (delta <= -6) {
- power_ctrl = 1;
- delta += 6;
- } else {
- power_ctrl = 0;
- }
- rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
- rt2800_bbp_write(rt2x00dev, 1, r1);
+ rt2800_bbp_read(rt2x00dev, 1, &r1);
+ if (delta <= -12) {
+ power_ctrl = 2;
+ delta += 12;
+ } else if (delta <= -6) {
+ power_ctrl = 1;
+ delta += 6;
+ } else {
+ power_ctrl = 0;
}
-
+ rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
+ rt2800_bbp_write(rt2x00dev, 1, r1);
offset = TX_PWR_CFG_0;
for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
diff --git a/trunk/drivers/net/xen-netback/netback.c b/trunk/drivers/net/xen-netback/netback.c
index 8c20935d72c9..37984e6d4e99 100644
--- a/trunk/drivers/net/xen-netback/netback.c
+++ b/trunk/drivers/net/xen-netback/netback.c
@@ -662,7 +662,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
{
struct xenvif *vif = NULL, *tmp;
s8 status;
- u16 flags;
+ u16 irq, flags;
struct xen_netif_rx_response *resp;
struct sk_buff_head rxq;
struct sk_buff *skb;
@@ -771,13 +771,13 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
+ irq = vif->irq;
+ if (ret && list_empty(&vif->notify_list))
+ list_add_tail(&vif->notify_list, ¬ify);
xenvif_notify_tx_completion(vif);
- if (ret && list_empty(&vif->notify_list))
- list_add_tail(&vif->notify_list, ¬ify);
- else
- xenvif_put(vif);
+ xenvif_put(vif);
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
}
@@ -785,7 +785,6 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) {
notify_remote_via_irq(vif->irq);
list_del_init(&vif->notify_list);
- xenvif_put(vif);
}
/* More work to do? */
diff --git a/trunk/drivers/parisc/iosapic.c b/trunk/drivers/parisc/iosapic.c
index e79e006eb9ab..9544cdc0d1af 100644
--- a/trunk/drivers/parisc/iosapic.c
+++ b/trunk/drivers/parisc/iosapic.c
@@ -811,70 +811,6 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
return pcidev->irq;
}
-static struct iosapic_info *first_isi = NULL;
-
-#ifdef CONFIG_64BIT
-int iosapic_serial_irq(int num)
-{
- struct iosapic_info *isi = first_isi;
- struct irt_entry *irte = NULL; /* only used if PAT PDC */
- struct vector_info *vi;
- int isi_line; /* line used by device */
-
- /* lookup IRT entry for isi/slot/pin set */
- irte = &irt_cell[num];
-
- DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
- irte,
- irte->entry_type,
- irte->entry_length,
- irte->polarity_trigger,
- irte->src_bus_irq_devno,
- irte->src_bus_id,
- irte->src_seg_id,
- irte->dest_iosapic_intin,
- (u32) irte->dest_iosapic_addr);
- isi_line = irte->dest_iosapic_intin;
-
- /* get vector info for this input line */
- vi = isi->isi_vector + isi_line;
- DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi);
-
- /* If this IRQ line has already been setup, skip it */
- if (vi->irte)
- goto out;
-
- vi->irte = irte;
-
- /*
- * Allocate processor IRQ
- *
- * XXX/FIXME The txn_alloc_irq() code and related code should be
- * moved to enable_irq(). That way we only allocate processor IRQ
- * bits for devices that actually have drivers claiming them.
- * Right now we assign an IRQ to every PCI device present,
- * regardless of whether it's used or not.
- */
- vi->txn_irq = txn_alloc_irq(8);
-
- if (vi->txn_irq < 0)
- panic("I/O sapic: couldn't get TXN IRQ\n");
-
- /* enable_irq() will use txn_* to program IRdT */
- vi->txn_addr = txn_alloc_addr(vi->txn_irq);
- vi->txn_data = txn_alloc_data(vi->txn_irq);
-
- vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI;
- vi->eoi_data = cpu_to_le32(vi->txn_data);
-
- cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi);
-
- out:
-
- return vi->txn_irq;
-}
-#endif
-
/*
** squirrel away the I/O Sapic Version
@@ -941,8 +877,6 @@ void *iosapic_register(unsigned long hpa)
vip->irqline = (unsigned char) cnt;
vip->iosapic = isi;
}
- if (!first_isi)
- first_isi = isi;
return isi;
}
diff --git a/trunk/drivers/pci/hotplug/acpiphp_glue.c b/trunk/drivers/pci/hotplug/acpiphp_glue.c
index 59df8575a48c..716aa93fff76 100644
--- a/trunk/drivers/pci/hotplug/acpiphp_glue.c
+++ b/trunk/drivers/pci/hotplug/acpiphp_glue.c
@@ -61,7 +61,6 @@ static DEFINE_MUTEX(bridge_mutex);
static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(struct pci_bus *bus);
-static void hotplug_event_func(acpi_handle handle, u32 type, void *context);
static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
static void free_bridge(struct kref *kref);
@@ -148,7 +147,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
static const struct acpi_dock_ops acpiphp_dock_ops = {
- .handler = hotplug_event_func,
+ .handler = handle_hotplug_event_func,
};
/* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -180,20 +179,6 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
return true;
}
-static void acpiphp_dock_init(void *data)
-{
- struct acpiphp_func *func = data;
-
- get_bridge(func->slot->bridge);
-}
-
-static void acpiphp_dock_release(void *data)
-{
- struct acpiphp_func *func = data;
-
- put_bridge(func->slot->bridge);
-}
-
/* callback routine to register each ACPI PCI slot object */
static acpi_status
register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
@@ -313,8 +298,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
*/
newfunc->flags &= ~FUNC_HAS_EJ0;
if (register_hotplug_dock_device(handle,
- &acpiphp_dock_ops, newfunc,
- acpiphp_dock_init, acpiphp_dock_release))
+ &acpiphp_dock_ops, newfunc))
dbg("failed to register dock device\n");
/* we need to be notified when dock events happen
@@ -686,7 +670,6 @@ static int __ref enable_device(struct acpiphp_slot *slot)
struct pci_bus *bus = slot->bridge->pci_bus;
struct acpiphp_func *func;
int num, max, pass;
- LIST_HEAD(add_list);
if (slot->flags & SLOT_ENABLED)
goto err_exit;
@@ -711,15 +694,13 @@ static int __ref enable_device(struct acpiphp_slot *slot)
max = pci_scan_bridge(bus, dev, max, pass);
if (pass && dev->subordinate) {
check_hotplug_bridge(slot, dev);
- pcibios_resource_survey_bus(dev->subordinate);
- __pci_bus_size_bridges(dev->subordinate,
- &add_list);
+ pci_bus_size_bridges(dev->subordinate);
}
}
}
}
- __pci_bus_assign_resources(bus, &add_list, NULL);
+ pci_bus_assign_resources(bus);
acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus);
acpiphp_set_acpi_region(slot);
@@ -1084,12 +1065,22 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
}
-static void hotplug_event_func(acpi_handle handle, u32 type, void *context)
+static void _handle_hotplug_event_func(struct work_struct *work)
{
- struct acpiphp_func *func = context;
+ struct acpiphp_func *func;
char objname[64];
struct acpi_buffer buffer = { .length = sizeof(objname),
.pointer = objname };
+ struct acpi_hp_work *hp_work;
+ acpi_handle handle;
+ u32 type;
+
+ hp_work = container_of(work, struct acpi_hp_work, work);
+ handle = hp_work->handle;
+ type = hp_work->type;
+ func = (struct acpiphp_func *)hp_work->context;
+
+ acpi_scan_lock_acquire();
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
@@ -1122,18 +1113,6 @@ static void hotplug_event_func(acpi_handle handle, u32 type, void *context)
warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
break;
}
-}
-
-static void _handle_hotplug_event_func(struct work_struct *work)
-{
- struct acpi_hp_work *hp_work;
- struct acpiphp_func *func;
-
- hp_work = container_of(work, struct acpi_hp_work, work);
- func = hp_work->context;
- acpi_scan_lock_acquire();
-
- hotplug_event_func(hp_work->handle, hp_work->type, func);
acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_func */
diff --git a/trunk/drivers/pci/pci.h b/trunk/drivers/pci/pci.h
index d1182c4a754e..68678ed76b0d 100644
--- a/trunk/drivers/pci/pci.h
+++ b/trunk/drivers/pci/pci.h
@@ -202,11 +202,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
void pci_configure_ari(struct pci_dev *dev);
-void __ref __pci_bus_size_bridges(struct pci_bus *bus,
- struct list_head *realloc_head);
-void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
- struct list_head *realloc_head,
- struct list_head *fail_head);
/**
* pci_ari_enabled - query ARI forwarding status
diff --git a/trunk/drivers/pci/setup-bus.c b/trunk/drivers/pci/setup-bus.c
index d254e2379533..16abaaa1f83c 100644
--- a/trunk/drivers/pci/setup-bus.c
+++ b/trunk/drivers/pci/setup-bus.c
@@ -1044,7 +1044,7 @@ static void pci_bus_size_cardbus(struct pci_bus *bus,
;
}
-void __ref __pci_bus_size_bridges(struct pci_bus *bus,
+static void __ref __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head)
{
struct pci_dev *dev;
@@ -1115,9 +1115,9 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_size_bridges);
-void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
- struct list_head *realloc_head,
- struct list_head *fail_head)
+static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
+ struct list_head *realloc_head,
+ struct list_head *fail_head)
{
struct pci_bus *b;
struct pci_dev *dev;
diff --git a/trunk/drivers/regulator/tps6586x-regulator.c b/trunk/drivers/regulator/tps6586x-regulator.c
index 2c9155b66f09..d8fa37d5c734 100644
--- a/trunk/drivers/regulator/tps6586x-regulator.c
+++ b/trunk/drivers/regulator/tps6586x-regulator.c
@@ -439,7 +439,7 @@ static int tps6586x_regulator_remove(struct platform_device *pdev)
static struct platform_driver tps6586x_regulator_driver = {
.driver = {
- .name = "tps6586x-regulator",
+ .name = "tps6586x-pmic",
.owner = THIS_MODULE,
},
.probe = tps6586x_regulator_probe,
diff --git a/trunk/drivers/s390/net/netiucv.c b/trunk/drivers/s390/net/netiucv.c
index 9ca3996f65b2..4ffa66c87ea5 100644
--- a/trunk/drivers/s390/net/netiucv.c
+++ b/trunk/drivers/s390/net/netiucv.c
@@ -2040,7 +2040,6 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
netiucv_setup_netdevice);
if (!dev)
return NULL;
- rtnl_lock();
if (dev_alloc_name(dev, dev->name) < 0)
goto out_netdev;
@@ -2062,7 +2061,6 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
out_fsm:
kfree_fsm(privptr->fsm);
out_netdev:
- rtnl_unlock();
free_netdev(dev);
return NULL;
}
@@ -2102,7 +2100,6 @@ static ssize_t conn_write(struct device_driver *drv,
rc = netiucv_register_device(dev);
if (rc) {
- rtnl_unlock();
IUCV_DBF_TEXT_(setup, 2,
"ret %d from netiucv_register_device\n", rc);
goto out_free_ndev;
@@ -2112,8 +2109,7 @@ static ssize_t conn_write(struct device_driver *drv,
priv = netdev_priv(dev);
SET_NETDEV_DEV(dev, priv->dev);
- rc = register_netdevice(dev);
- rtnl_unlock();
+ rc = register_netdev(dev);
if (rc)
goto out_unreg;
diff --git a/trunk/drivers/scsi/bfa/bfad_debugfs.c b/trunk/drivers/scsi/bfa/bfad_debugfs.c
index b63d534192e3..439c012be763 100644
--- a/trunk/drivers/scsi/bfa/bfad_debugfs.c
+++ b/trunk/drivers/scsi/bfa/bfad_debugfs.c
@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
file->f_pos += offset;
break;
case 2:
- file->f_pos = debug->buffer_len + offset;
+ file->f_pos = debug->buffer_len - offset;
break;
default:
return -EINVAL;
diff --git a/trunk/drivers/scsi/fcoe/fcoe.c b/trunk/drivers/scsi/fcoe/fcoe.c
index 32ae6c67ea3a..292b24f9bf93 100644
--- a/trunk/drivers/scsi/fcoe/fcoe.c
+++ b/trunk/drivers/scsi/fcoe/fcoe.c
@@ -1656,12 +1656,9 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
- /* must set skb->dev before calling vlan_put_tag */
+ skb->vlan_tci = VLAN_TAG_PRESENT |
+ vlan_dev_vlan_id(fcoe->netdev);
skb->dev = fcoe->realdev;
- skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- vlan_dev_vlan_id(fcoe->netdev));
- if (!skb)
- return -ENOMEM;
} else
skb->dev = fcoe->netdev;
diff --git a/trunk/drivers/scsi/fcoe/fcoe_ctlr.c b/trunk/drivers/scsi/fcoe/fcoe_ctlr.c
index 795843dde8ec..cd743c545ce9 100644
--- a/trunk/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/trunk/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1548,6 +1548,9 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
struct fcoe_fcf *best = fip->sel_fcf;
+ struct fcoe_fcf *first;
+
+ first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list);
list_for_each_entry(fcf, &fip->fcfs, list) {
LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx "
@@ -1565,15 +1568,17 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
"" : "un");
continue;
}
- if (!best || fcf->pri < best->pri || best->flogi_sent)
- best = fcf;
- if (fcf->fabric_name != best->fabric_name ||
- fcf->vfid != best->vfid ||
- fcf->fc_map != best->fc_map) {
+ if (fcf->fabric_name != first->fabric_name ||
+ fcf->vfid != first->vfid ||
+ fcf->fc_map != first->fc_map) {
LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
"or FC-MAP\n");
return NULL;
}
+ if (fcf->flogi_sent)
+ continue;
+ if (!best || fcf->pri < best->pri || best->flogi_sent)
+ best = fcf;
}
fip->sel_fcf = best;
if (best) {
diff --git a/trunk/drivers/scsi/fnic/fnic_debugfs.c b/trunk/drivers/scsi/fnic/fnic_debugfs.c
index 85e1ffd0e5c5..adc1f7f471f5 100644
--- a/trunk/drivers/scsi/fnic/fnic_debugfs.c
+++ b/trunk/drivers/scsi/fnic/fnic_debugfs.c
@@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file,
pos = file->f_pos + offset;
break;
case 2:
- pos = fnic_dbg_prt->buffer_len + offset;
+ pos = fnic_dbg_prt->buffer_len - offset;
}
return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
-EINVAL : (file->f_pos = pos);
diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c
index 6c4cedb44c07..82a3c1ec8706 100644
--- a/trunk/drivers/scsi/ipr.c
+++ b/trunk/drivers/scsi/ipr.c
@@ -8980,6 +8980,19 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
if (!ioa_cfg->res_entries)
goto out;
+ if (ioa_cfg->sis64) {
+ ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
+ BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+ ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
+ BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+ ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
+ BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+
+ if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
+ || !ioa_cfg->vset_ids)
+ goto out_free_res_entries;
+ }
+
for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
@@ -9076,6 +9089,9 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
out_free_res_entries:
kfree(ioa_cfg->res_entries);
+ kfree(ioa_cfg->target_ids);
+ kfree(ioa_cfg->array_ids);
+ kfree(ioa_cfg->vset_ids);
goto out;
}
diff --git a/trunk/drivers/scsi/ipr.h b/trunk/drivers/scsi/ipr.h
index 07a85ce41782..a1fb840596ef 100644
--- a/trunk/drivers/scsi/ipr.h
+++ b/trunk/drivers/scsi/ipr.h
@@ -1440,9 +1440,9 @@ struct ipr_ioa_cfg {
/*
* Bitmaps for SIS64 generated target values
*/
- unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
- unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
- unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
+ unsigned long *target_ids;
+ unsigned long *array_ids;
+ unsigned long *vset_ids;
u16 type; /* CCIN of the card */
diff --git a/trunk/drivers/scsi/libfc/fc_exch.c b/trunk/drivers/scsi/libfc/fc_exch.c
index 8b928c67e4b9..c772d8d27159 100644
--- a/trunk/drivers/scsi/libfc/fc_exch.c
+++ b/trunk/drivers/scsi/libfc/fc_exch.c
@@ -463,7 +463,13 @@ static void fc_exch_delete(struct fc_exch *ep)
fc_exch_release(ep); /* drop hold for exch in mp */
}
-static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
+/**
+ * fc_seq_send() - Send a frame using existing sequence/exchange pair
+ * @lport: The local port that the exchange will be sent on
+ * @sp: The sequence to be sent
+ * @fp: The frame to be sent on the exchange
+ */
+static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
struct fc_frame *fp)
{
struct fc_exch *ep;
@@ -473,7 +479,7 @@ static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
u8 fh_type = fh->fh_type;
ep = fc_seq_exch(sp);
- WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
+ WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
f_ctl = ntoh24(fh->fh_f_ctl);
fc_exch_setup_hdr(ep, fp, f_ctl);
@@ -496,34 +502,17 @@ static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
error = lport->tt.frame_send(lport, fp);
if (fh_type == FC_TYPE_BLS)
- goto out;
+ return error;
/*
* Update the exchange and sequence flags,
* assuming all frames for the sequence have been sent.
* We can only be called to send once for each sequence.
*/
+ spin_lock_bh(&ep->ex_lock);
ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat &= ~ESB_ST_SEQ_INIT;
-out:
- return error;
-}
-
-/**
- * fc_seq_send() - Send a frame using existing sequence/exchange pair
- * @lport: The local port that the exchange will be sent on
- * @sp: The sequence to be sent
- * @fp: The frame to be sent on the exchange
- */
-static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
- struct fc_frame *fp)
-{
- struct fc_exch *ep;
- int error;
- ep = fc_seq_exch(sp);
- spin_lock_bh(&ep->ex_lock);
- error = fc_seq_send_locked(lport, sp, fp);
spin_unlock_bh(&ep->ex_lock);
return error;
}
@@ -640,7 +629,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
if (fp) {
fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
- error = fc_seq_send_locked(ep->lp, sp, fp);
+ error = fc_seq_send(ep->lp, sp, fp);
} else
error = -ENOBUFS;
return error;
@@ -1143,7 +1132,7 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
f_ctl |= ep->f_ctl;
fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
- fc_seq_send_locked(ep->lp, sp, fp);
+ fc_seq_send(ep->lp, sp, fp);
}
/**
@@ -1318,8 +1307,8 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
ap->ba_low_seq_cnt = htons(sp->cnt);
}
sp = fc_seq_start_next_locked(sp);
- fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
spin_unlock_bh(&ep->ex_lock);
+ fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
fc_frame_free(rx_fp);
return;
diff --git a/trunk/drivers/scsi/libfc/fc_rport.c b/trunk/drivers/scsi/libfc/fc_rport.c
index 6bbb9447b75d..d518d17e940f 100644
--- a/trunk/drivers/scsi/libfc/fc_rport.c
+++ b/trunk/drivers/scsi/libfc/fc_rport.c
@@ -1962,7 +1962,7 @@ static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
rdata->flags |= FC_RP_FLAGS_RETRY;
rdata->supported_classes = FC_COS_CLASS3;
- if (!(lport->service_params & FCP_SPPF_INIT_FCN))
+ if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR))
return 0;
spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
diff --git a/trunk/drivers/scsi/lpfc/lpfc_debugfs.c b/trunk/drivers/scsi/lpfc/lpfc_debugfs.c
index f525ecb7a9c6..f63f5ff7f274 100644
--- a/trunk/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/trunk/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
pos = file->f_pos + off;
break;
case 2:
- pos = debug->len + off;
+ pos = debug->len - off;
}
return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
}
diff --git a/trunk/drivers/scsi/qla2xxx/qla_inline.h b/trunk/drivers/scsi/qla2xxx/qla_inline.h
index 0a5c8951cebb..98ab921070d2 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_inline.h
+++ b/trunk/drivers/scsi/qla2xxx/qla_inline.h
@@ -278,14 +278,3 @@ qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
}
-
-static inline void
-qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
-{
- if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
- (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
- set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- complete(&ha->mbx_intr_comp);
- }
-}
diff --git a/trunk/drivers/scsi/qla2xxx/qla_isr.c b/trunk/drivers/scsi/qla2xxx/qla_isr.c
index d2a4c75e5b8f..259d9205d876 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_isr.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_isr.c
@@ -104,9 +104,14 @@ qla2100_intr_handler(int irq, void *dev_id)
RD_REG_WORD(®->hccr);
}
}
- qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+
return (IRQ_HANDLED);
}
@@ -216,9 +221,14 @@ qla2300_intr_handler(int irq, void *dev_id)
WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
RD_REG_WORD_RELAXED(®->hccr);
}
- qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+
return (IRQ_HANDLED);
}
@@ -2603,9 +2613,14 @@ qla24xx_intr_handler(int irq, void *dev_id)
if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
ndelay(3500);
}
- qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+
return IRQ_HANDLED;
}
@@ -2748,9 +2763,13 @@ qla24xx_msix_default(int irq, void *dev_id)
}
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
} while (0);
- qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
return IRQ_HANDLED;
}
diff --git a/trunk/drivers/scsi/qla2xxx/qla_mbx.c b/trunk/drivers/scsi/qla2xxx/qla_mbx.c
index 3587ec267fa6..9e5d89db7272 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_mbx.c
@@ -179,6 +179,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1011,
"Cmd=%x Polling Mode.\n", command);
diff --git a/trunk/drivers/scsi/qla2xxx/qla_mr.c b/trunk/drivers/scsi/qla2xxx/qla_mr.c
index a6df55838365..937fed8cb038 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_mr.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_mr.c
@@ -148,6 +148,9 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
+
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
} else {
ql_dbg(ql_dbg_mbx, vha, 0x112c,
"Cmd=%x Polling Mode.\n", command);
@@ -2931,10 +2934,13 @@ qlafx00_intr_handler(int irq, void *dev_id)
QLAFX00_CLR_INTR_REG(ha, clr_intr);
QLAFX00_RD_INTR_REG(ha);
}
-
- qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
return IRQ_HANDLED;
}
diff --git a/trunk/drivers/scsi/qla2xxx/qla_nx.c b/trunk/drivers/scsi/qla2xxx/qla_nx.c
index cce0cd0d7ec4..10754f518303 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_nx.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_nx.c
@@ -2074,6 +2074,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
}
WRT_REG_DWORD(®->host_int, 0);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (!ha->flags.msi_enabled)
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
#ifdef QL_DEBUG_LEVEL_17
if (!irq && ha->flags.eeh_busy)
@@ -2082,12 +2085,11 @@ qla82xx_intr_handler(int irq, void *dev_id)
status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
#endif
- qla2x00_handle_mbx_completion(ha, status);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- if (!ha->flags.msi_enabled)
- qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
-
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
return IRQ_HANDLED;
}
@@ -2147,6 +2149,8 @@ qla82xx_msix_default(int irq, void *dev_id)
WRT_REG_DWORD(®->host_int, 0);
} while (0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
#ifdef QL_DEBUG_LEVEL_17
if (!irq && ha->flags.eeh_busy)
ql_log(ql_log_warn, vha, 0x5044,
@@ -2154,9 +2158,11 @@ qla82xx_msix_default(int irq, void *dev_id)
status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
#endif
- qla2x00_handle_mbx_completion(ha, status);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
return IRQ_HANDLED;
}
@@ -3339,7 +3345,7 @@ void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
ha->flags.mbox_busy = 0;
ql_log(ql_log_warn, vha, 0x6010,
"Doing premature completion of mbx command.\n");
- if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
complete(&ha->mbx_intr_comp);
}
}
diff --git a/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 66b0b26a1381..7a3870f385f6 100644
--- a/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -688,12 +688,8 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
* For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
* for qla_tgt_xmit_response LLD code
*/
- if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
- se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
- se_cmd->residual_count = 0;
- }
se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
- se_cmd->residual_count += se_cmd->data_length;
+ se_cmd->residual_count = se_cmd->data_length;
cmd->bufflen = 0;
}
diff --git a/trunk/drivers/spi/spi-pxa2xx-dma.c b/trunk/drivers/spi/spi-pxa2xx-dma.c
index 6427600b5bbe..c735c5a008a2 100644
--- a/trunk/drivers/spi/spi-pxa2xx-dma.c
+++ b/trunk/drivers/spi/spi-pxa2xx-dma.c
@@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
int ret;
sg_free_table(sgt);
- ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
+ ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret)
return ret;
}
diff --git a/trunk/drivers/spi/spi-pxa2xx.c b/trunk/drivers/spi/spi-pxa2xx.c
index 48b396fced0a..f5d84d6f8222 100644
--- a/trunk/drivers/spi/spi-pxa2xx.c
+++ b/trunk/drivers/spi/spi-pxa2xx.c
@@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
return NULL;
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev,
"failed to allocate memory for platform data\n");
diff --git a/trunk/drivers/spi/spi-s3c64xx.c b/trunk/drivers/spi/spi-s3c64xx.c
index 71cc3e6ef47c..5000586cb98d 100644
--- a/trunk/drivers/spi/spi-s3c64xx.c
+++ b/trunk/drivers/spi/spi-s3c64xx.c
@@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
}
ret = pm_runtime_get_sync(&sdd->pdev->dev);
- if (ret < 0) {
+ if (ret != 0) {
dev_err(dev, "Failed to enable device: %d\n", ret);
goto out_tx;
}
diff --git a/trunk/drivers/staging/media/davinci_vpfe/Kconfig b/trunk/drivers/staging/media/davinci_vpfe/Kconfig
index 12f321dd2399..2e4a28b018e8 100644
--- a/trunk/drivers/staging/media/davinci_vpfe/Kconfig
+++ b/trunk/drivers/staging/media/davinci_vpfe/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_DM365_VPFE
tristate "DM365 VPFE Media Controller Capture Driver"
- depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF
+ depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE
select VIDEOBUF2_DMA_CONTIG
help
Support for DM365 VPFE based Media Controller Capture driver.
diff --git a/trunk/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/trunk/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index d8ce20d2fbda..b88e1ddce229 100644
--- a/trunk/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/trunk/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -639,8 +639,7 @@ static int vpfe_probe(struct platform_device *pdev)
if (ret)
goto probe_free_dev_mem;
- ret = vpfe_initialize_modules(vpfe_dev, pdev);
- if (ret)
+ if (vpfe_initialize_modules(vpfe_dev, pdev))
goto probe_disable_clock;
vpfe_dev->media_dev.dev = vpfe_dev->pdev;
@@ -664,8 +663,7 @@ static int vpfe_probe(struct platform_device *pdev)
/* set the driver data in platform device */
platform_set_drvdata(pdev, vpfe_dev);
/* register subdevs/entities */
- ret = vpfe_register_entities(vpfe_dev);
- if (ret)
+ if (vpfe_register_entities(vpfe_dev))
goto probe_out_v4l2_unregister;
ret = vpfe_attach_irq(vpfe_dev);
diff --git a/trunk/drivers/staging/media/solo6x10/Kconfig b/trunk/drivers/staging/media/solo6x10/Kconfig
index 34f3b6d02d2a..df6569b997b8 100644
--- a/trunk/drivers/staging/media/solo6x10/Kconfig
+++ b/trunk/drivers/staging/media/solo6x10/Kconfig
@@ -5,7 +5,6 @@ config SOLO6X10
select VIDEOBUF2_DMA_SG
select VIDEOBUF2_DMA_CONTIG
select SND_PCM
- select FONT_8x16
---help---
This driver supports the Softlogic based MPEG-4 and h.264 codec
cards.
diff --git a/trunk/drivers/target/iscsi/iscsi_target_configfs.c b/trunk/drivers/target/iscsi/iscsi_target_configfs.c
index 8d8b3ff68490..13e9e715ad2e 100644
--- a/trunk/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/trunk/drivers/target/iscsi/iscsi_target_configfs.c
@@ -155,7 +155,7 @@ static ssize_t lio_target_np_store_iser(
struct iscsi_tpg_np *tpg_np_iser = NULL;
char *endptr;
u32 op;
- int rc = 0;
+ int rc;
op = simple_strtoul(page, &endptr, 0);
if ((op != 1) && (op != 0)) {
@@ -174,32 +174,31 @@ static ssize_t lio_target_np_store_iser(
return -EINVAL;
if (op) {
- rc = request_module("ib_isert");
- if (rc != 0) {
+ int rc = request_module("ib_isert");
+ if (rc != 0)
pr_warn("Unable to request_module for ib_isert\n");
- rc = 0;
- }
tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
np->np_ip, tpg_np, ISCSI_INFINIBAND);
- if (IS_ERR(tpg_np_iser)) {
- rc = PTR_ERR(tpg_np_iser);
+ if (!tpg_np_iser || IS_ERR(tpg_np_iser))
goto out;
- }
} else {
tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
- if (tpg_np_iser) {
- rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
- if (rc < 0)
- goto out;
- }
+ if (!tpg_np_iser)
+ goto out;
+
+ rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
+ if (rc < 0)
+ goto out;
}
+ printk("lio_target_np_store_iser() done, op: %d\n", op);
+
iscsit_put_tpg(tpg);
return count;
out:
iscsit_put_tpg(tpg);
- return rc;
+ return -EINVAL;
}
TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
diff --git a/trunk/drivers/target/iscsi/iscsi_target_erl0.c b/trunk/drivers/target/iscsi/iscsi_target_erl0.c
index dcb199da06b9..8e6298cc8839 100644
--- a/trunk/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/trunk/drivers/target/iscsi/iscsi_target_erl0.c
@@ -842,11 +842,11 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
return 0;
sess->time2retain_timer_flags |= ISCSI_TF_STOP;
- spin_unlock(&se_tpg->session_lock);
+ spin_unlock_bh(&se_tpg->session_lock);
del_timer_sync(&sess->time2retain_timer);
- spin_lock(&se_tpg->session_lock);
+ spin_lock_bh(&se_tpg->session_lock);
sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
pr_debug("Stopped Time2Retain Timer for SID: %u\n",
sess->sid);
diff --git a/trunk/drivers/target/iscsi/iscsi_target_login.c b/trunk/drivers/target/iscsi/iscsi_target_login.c
index 3402241be87c..bb5d5c5bce65 100644
--- a/trunk/drivers/target/iscsi/iscsi_target_login.c
+++ b/trunk/drivers/target/iscsi/iscsi_target_login.c
@@ -984,6 +984,8 @@ int iscsi_target_setup_login_socket(
}
np->np_transport = t;
+ printk("Set np->np_transport to %p -> %s\n", np->np_transport,
+ np->np_transport->name);
return 0;
}
@@ -1000,6 +1002,7 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
conn->sock = new_sock;
conn->login_family = np->np_sockaddr.ss_family;
+ printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock);
if (np->np_sockaddr.ss_family == AF_INET6) {
memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
diff --git a/trunk/drivers/target/iscsi/iscsi_target_nego.c b/trunk/drivers/target/iscsi/iscsi_target_nego.c
index cd5018ff9cd7..7ad912060e21 100644
--- a/trunk/drivers/target/iscsi/iscsi_target_nego.c
+++ b/trunk/drivers/target/iscsi/iscsi_target_nego.c
@@ -721,6 +721,9 @@ int iscsi_target_locate_portal(
start += strlen(key) + strlen(value) + 2;
}
+
+ printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf);
+
/*
* See 5.3. Login Phase.
*/
diff --git a/trunk/drivers/tty/pty.c b/trunk/drivers/tty/pty.c
index abfd99089781..59bfaecc4e14 100644
--- a/trunk/drivers/tty/pty.c
+++ b/trunk/drivers/tty/pty.c
@@ -244,9 +244,14 @@ static void pty_flush_buffer(struct tty_struct *tty)
static int pty_open(struct tty_struct *tty, struct file *filp)
{
+ int retval = -ENODEV;
+
if (!tty || !tty->link)
- return -ENODEV;
+ goto out;
+
+ set_bit(TTY_IO_ERROR, &tty->flags);
+ retval = -EIO;
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
goto out;
if (test_bit(TTY_PTY_LOCK, &tty->link->flags))
@@ -257,11 +262,9 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
clear_bit(TTY_IO_ERROR, &tty->flags);
clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
set_bit(TTY_THROTTLED, &tty->flags);
- return 0;
-
+ retval = 0;
out:
- set_bit(TTY_IO_ERROR, &tty->flags);
- return -EIO;
+ return retval;
}
static void pty_set_termios(struct tty_struct *tty,
diff --git a/trunk/drivers/tty/serial/8250/8250_gsc.c b/trunk/drivers/tty/serial/8250/8250_gsc.c
index bb91b4713ebd..097dff9c08ad 100644
--- a/trunk/drivers/tty/serial/8250/8250_gsc.c
+++ b/trunk/drivers/tty/serial/8250/8250_gsc.c
@@ -30,12 +30,6 @@ static int __init serial_init_chip(struct parisc_device *dev)
unsigned long address;
int err;
-#ifdef CONFIG_64BIT
- extern int iosapic_serial_irq(int cellnum);
- if (!dev->irq && (dev->id.sversion == 0xad))
- dev->irq = iosapic_serial_irq(dev->mod_index-1);
-#endif
-
if (!dev->irq) {
/* We find some unattached serial ports by walking native
* busses. These should be silently ignored. Otherwise,
@@ -57,8 +51,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
memset(&uart, 0, sizeof(uart));
uart.port.iotype = UPIO_MEM;
/* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */
- uart.port.uartclk = (dev->id.sversion != 0xad) ?
- 7272727 : 1843200;
+ uart.port.uartclk = 7272727;
uart.port.mapbase = address;
uart.port.membase = ioremap_nocache(address, 16);
uart.port.irq = dev->irq;
@@ -80,7 +73,6 @@ static struct parisc_device_id serial_tbl[] = {
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 },
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c },
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d },
- { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad },
{ 0 }
};
diff --git a/trunk/drivers/tty/vt/vt_ioctl.c b/trunk/drivers/tty/vt/vt_ioctl.c
index 2bd78e2ac8ec..fc2c06c66e89 100644
--- a/trunk/drivers/tty/vt/vt_ioctl.c
+++ b/trunk/drivers/tty/vt/vt_ioctl.c
@@ -289,10 +289,13 @@ static int vt_disallocate(unsigned int vc_num)
struct vc_data *vc = NULL;
int ret = 0;
+ if (!vc_num)
+ return 0;
+
console_lock();
if (VT_BUSY(vc_num))
ret = -EBUSY;
- else if (vc_num)
+ else
vc = vc_deallocate(vc_num);
console_unlock();
diff --git a/trunk/drivers/usb/chipidea/core.c b/trunk/drivers/usb/chipidea/core.c
index 475c9c114689..49b098bedf9b 100644
--- a/trunk/drivers/usb/chipidea/core.c
+++ b/trunk/drivers/usb/chipidea/core.c
@@ -276,9 +276,8 @@ static void ci_role_work(struct work_struct *work)
ci_role_stop(ci);
ci_role_start(ci, role);
+ enable_irq(ci->irq);
}
-
- enable_irq(ci->irq);
}
static irqreturn_t ci_irq(int irq, void *data)
diff --git a/trunk/drivers/usb/chipidea/udc.c b/trunk/drivers/usb/chipidea/udc.c
index b501346484ae..519ead2443c5 100644
--- a/trunk/drivers/usb/chipidea/udc.c
+++ b/trunk/drivers/usb/chipidea/udc.c
@@ -1678,11 +1678,8 @@ static int udc_start(struct ci13xxx *ci)
ci->gadget.ep0 = &ci->ep0in->ep;
- if (ci->global_phy) {
+ if (ci->global_phy)
ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
- if (IS_ERR(ci->transceiver))
- ci->transceiver = NULL;
- }
if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
if (ci->transceiver == NULL) {
@@ -1697,7 +1694,7 @@ static int udc_start(struct ci13xxx *ci)
goto put_transceiver;
}
- if (ci->transceiver) {
+ if (!IS_ERR_OR_NULL(ci->transceiver)) {
retval = otg_set_peripheral(ci->transceiver->otg,
&ci->gadget);
if (retval)
@@ -1714,7 +1711,7 @@ static int udc_start(struct ci13xxx *ci)
return retval;
remove_trans:
- if (ci->transceiver) {
+ if (!IS_ERR_OR_NULL(ci->transceiver)) {
otg_set_peripheral(ci->transceiver->otg, NULL);
if (ci->global_phy)
usb_put_phy(ci->transceiver);
@@ -1722,7 +1719,7 @@ static int udc_start(struct ci13xxx *ci)
dev_err(dev, "error = %i\n", retval);
put_transceiver:
- if (ci->transceiver && ci->global_phy)
+ if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy)
usb_put_phy(ci->transceiver);
destroy_eps:
destroy_eps(ci);
@@ -1750,7 +1747,7 @@ static void udc_stop(struct ci13xxx *ci)
dma_pool_destroy(ci->td_pool);
dma_pool_destroy(ci->qh_pool);
- if (ci->transceiver) {
+ if (!IS_ERR_OR_NULL(ci->transceiver)) {
otg_set_peripheral(ci->transceiver->otg, NULL);
if (ci->global_phy)
usb_put_phy(ci->transceiver);
diff --git a/trunk/drivers/usb/phy/Kconfig b/trunk/drivers/usb/phy/Kconfig
index 2311b1e4e43c..7ef3eb8617a6 100644
--- a/trunk/drivers/usb/phy/Kconfig
+++ b/trunk/drivers/usb/phy/Kconfig
@@ -4,17 +4,11 @@
menuconfig USB_PHY
bool "USB Physical Layer drivers"
help
- Most USB controllers have the physical layer signalling part
- (commonly called a PHY) built in. However, dual-role devices
- (a.k.a. USB on-the-go) which support being USB master or slave
- with the same connector often use an external PHY.
+ USB controllers (those which are host, device or DRD) need a
+ device to handle the physical layer signalling, commonly called
+ a PHY.
- The drivers in this submenu add support for such PHY devices.
- They are not needed for standard master-only (or the vast
- majority of slave-only) USB interfaces.
-
- If you're not sure if this applies to you, it probably doesn't;
- say N here.
+ The following drivers add support for such PHY devices.
if USB_PHY
diff --git a/trunk/drivers/usb/serial/f81232.c b/trunk/drivers/usb/serial/f81232.c
index 7d8dd5aad236..090b411d893f 100644
--- a/trunk/drivers/usb/serial/f81232.c
+++ b/trunk/drivers/usb/serial/f81232.c
@@ -165,12 +165,11 @@ static void f81232_set_termios(struct tty_struct *tty,
/* FIXME - Stubbed out for now */
/* Don't change anything if nothing has changed */
- if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
+ if (!tty_termios_hw_change(&tty->termios, old_termios))
return;
/* Do the real work here... */
- if (old_termios)
- tty_termios_copy_hw(&tty->termios, old_termios);
+ tty_termios_copy_hw(&tty->termios, old_termios);
}
static int f81232_tiocmget(struct tty_struct *tty)
@@ -188,11 +187,12 @@ static int f81232_tiocmset(struct tty_struct *tty,
static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
{
+ struct ktermios tmp_termios;
int result;
/* Setup termios */
if (tty)
- f81232_set_termios(tty, port, NULL);
+ f81232_set_termios(tty, port, &tmp_termios);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
diff --git a/trunk/drivers/usb/serial/pl2303.c b/trunk/drivers/usb/serial/pl2303.c
index 048cd44d51b1..7151659367a0 100644
--- a/trunk/drivers/usb/serial/pl2303.c
+++ b/trunk/drivers/usb/serial/pl2303.c
@@ -284,7 +284,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
serial settings even to the same values as before. Thus
we actually need to filter in this specific case */
- if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
+ if (!tty_termios_hw_change(&tty->termios, old_termios))
return;
cflag = tty->termios.c_cflag;
@@ -293,8 +293,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
if (!buf) {
dev_err(&port->dev, "%s - out of memory.\n", __func__);
/* Report back no change occurred */
- if (old_termios)
- tty->termios = *old_termios;
+ tty->termios = *old_termios;
return;
}
@@ -434,7 +433,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
control = priv->line_control;
if ((cflag & CBAUD) == B0)
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
- else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+ else if ((old_termios->c_cflag & CBAUD) == B0)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
if (control != priv->line_control) {
control = priv->line_control;
@@ -493,6 +492,7 @@ static void pl2303_close(struct usb_serial_port *port)
static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
{
+ struct ktermios tmp_termios;
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int result;
@@ -508,7 +508,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
/* Setup termios */
if (tty)
- pl2303_set_termios(tty, port, NULL);
+ pl2303_set_termios(tty, port, &tmp_termios);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
diff --git a/trunk/drivers/usb/serial/spcp8x5.c b/trunk/drivers/usb/serial/spcp8x5.c
index ddf6c47137dc..cf3df793c2b7 100644
--- a/trunk/drivers/usb/serial/spcp8x5.c
+++ b/trunk/drivers/usb/serial/spcp8x5.c
@@ -291,6 +291,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int cflag = tty->termios.c_cflag;
+ unsigned int old_cflag = old_termios->c_cflag;
unsigned short uartdata;
unsigned char buf[2] = {0, 0};
int baud;
@@ -298,15 +299,15 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
u8 control;
/* check that they really want us to change something */
- if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
+ if (!tty_termios_hw_change(&tty->termios, old_termios))
return;
/* set DTR/RTS active */
spin_lock_irqsave(&priv->lock, flags);
control = priv->line_control;
- if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
+ if ((old_cflag & CBAUD) == B0) {
priv->line_control |= MCR_DTR;
- if (!(old_termios->c_cflag & CRTSCTS))
+ if (!(old_cflag & CRTSCTS))
priv->line_control |= MCR_RTS;
}
if (control != priv->line_control) {
@@ -393,6 +394,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
{
+ struct ktermios tmp_termios;
struct usb_serial *serial = port->serial;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
int ret;
@@ -409,7 +411,7 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
spcp8x5_set_ctrl_line(port, priv->line_control);
if (tty)
- spcp8x5_set_termios(tty, port, NULL);
+ spcp8x5_set_termios(tty, port, &tmp_termios);
port->port.drain_delay = 256;
diff --git a/trunk/drivers/usb/serial/ti_usb_3410_5052.c b/trunk/drivers/usb/serial/ti_usb_3410_5052.c
index e581c2549a57..c92c5ed4e580 100644
--- a/trunk/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/trunk/drivers/usb/serial/ti_usb_3410_5052.c
@@ -172,8 +172,7 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
- { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
- { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
+ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
};
diff --git a/trunk/drivers/usb/serial/ti_usb_3410_5052.h b/trunk/drivers/usb/serial/ti_usb_3410_5052.h
index 4a2423e84d55..b353e7e3d480 100644
--- a/trunk/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/trunk/drivers/usb/serial/ti_usb_3410_5052.h
@@ -52,9 +52,7 @@
/* Abbott Diabetics vendor and product ids */
#define ABBOTT_VENDOR_ID 0x1a61
-#define ABBOTT_STEREO_PLUG_ID 0x3410
-#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
-#define ABBOTT_STRIP_PORT_ID 0x3420
+#define ABBOTT_PRODUCT_ID 0x3410
/* Commands */
#define TI_GET_VERSION 0x01
diff --git a/trunk/drivers/vfio/pci/vfio_pci.c b/trunk/drivers/vfio/pci/vfio_pci.c
index c5179e269df6..ac3725440d64 100644
--- a/trunk/drivers/vfio/pci/vfio_pci.c
+++ b/trunk/drivers/vfio/pci/vfio_pci.c
@@ -499,6 +499,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
}
vma->vm_private_data = vdev;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
diff --git a/trunk/drivers/video/au1100fb.c b/trunk/drivers/video/au1100fb.c
index ebeb9715f061..700cac067b46 100644
--- a/trunk/drivers/video/au1100fb.c
+++ b/trunk/drivers/video/au1100fb.c
@@ -385,6 +385,8 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
+ vma->vm_flags |= VM_IO;
+
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot)) {
diff --git a/trunk/drivers/video/au1200fb.c b/trunk/drivers/video/au1200fb.c
index 301224ecc950..1b59054fc6a4 100644
--- a/trunk/drivers/video/au1200fb.c
+++ b/trunk/drivers/video/au1200fb.c
@@ -1258,9 +1258,13 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
+ vma->vm_flags |= VM_IO;
+
return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
+
+ return 0;
}
static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
diff --git a/trunk/drivers/video/pxa3xx-gcu.c b/trunk/drivers/video/pxa3xx-gcu.c
index 7cf0b13d061b..97563c55af63 100644
--- a/trunk/drivers/video/pxa3xx-gcu.c
+++ b/trunk/drivers/video/pxa3xx-gcu.c
@@ -494,6 +494,7 @@ pxa3xx_gcu_misc_mmap(struct file *file, struct vm_area_struct *vma)
if (size != resource_size(priv->resource_mem))
return -EINVAL;
+ vma->vm_flags |= VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return io_remap_pfn_range(vma, vma->vm_start,
diff --git a/trunk/fs/9p/vfs_addr.c b/trunk/fs/9p/vfs_addr.c
index 9ff073f4090a..055562c580b4 100644
--- a/trunk/fs/9p/vfs_addr.c
+++ b/trunk/fs/9p/vfs_addr.c
@@ -148,14 +148,13 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
* @offset: offset in the page
*/
-static void v9fs_invalidate_page(struct page *page, unsigned int offset,
- unsigned int length)
+static void v9fs_invalidate_page(struct page *page, unsigned long offset)
{
/*
* If called with zero offset, we should release
* the private state assocated with the page
*/
- if (offset == 0 && length == PAGE_CACHE_SIZE)
+ if (offset == 0)
v9fs_fscache_invalidate_page(page);
}
diff --git a/trunk/fs/9p/vfs_dir.c b/trunk/fs/9p/vfs_dir.c
index 4d0c2e0be7e5..be1e34adc3c6 100644
--- a/trunk/fs/9p/vfs_dir.c
+++ b/trunk/fs/9p/vfs_dir.c
@@ -101,15 +101,16 @@ static struct p9_rdir *v9fs_alloc_rdir_buf(struct file *filp, int buflen)
}
/**
- * v9fs_dir_readdir - iterate through a directory
- * @file: opened file structure
- * @ctx: actor we feed the entries to
+ * v9fs_dir_readdir - read a directory
+ * @filp: opened file structure
+ * @dirent: directory structure ???
+ * @filldir: function to populate directory structure ???
*
*/
-static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
+static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- bool over;
+ int over;
struct p9_wstat st;
int err = 0;
struct p9_fid *fid;
@@ -117,19 +118,19 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
int reclen = 0;
struct p9_rdir *rdir;
- p9_debug(P9_DEBUG_VFS, "name %s\n", file->f_path.dentry->d_name.name);
- fid = file->private_data;
+ p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
+ fid = filp->private_data;
buflen = fid->clnt->msize - P9_IOHDRSZ;
- rdir = v9fs_alloc_rdir_buf(file, buflen);
+ rdir = v9fs_alloc_rdir_buf(filp, buflen);
if (!rdir)
return -ENOMEM;
while (1) {
if (rdir->tail == rdir->head) {
- err = v9fs_file_readn(file, rdir->buf, NULL,
- buflen, ctx->pos);
+ err = v9fs_file_readn(filp, rdir->buf, NULL,
+ buflen, filp->f_pos);
if (err <= 0)
return err;
@@ -147,45 +148,51 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
}
reclen = st.size+2;
- over = !dir_emit(ctx, st.name, strlen(st.name),
- v9fs_qid2ino(&st.qid), dt_type(&st));
+ over = filldir(dirent, st.name, strlen(st.name),
+ filp->f_pos, v9fs_qid2ino(&st.qid), dt_type(&st));
+
p9stat_free(&st);
+
if (over)
return 0;
rdir->head += reclen;
- ctx->pos += reclen;
+ filp->f_pos += reclen;
}
}
}
/**
- * v9fs_dir_readdir_dotl - iterate through a directory
- * @file: opened file structure
- * @ctx: actor we feed the entries to
+ * v9fs_dir_readdir_dotl - read a directory
+ * @filp: opened file structure
+ * @dirent: buffer to fill dirent structures
+ * @filldir: function to populate dirent structures
*
*/
-static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx)
+static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
+ filldir_t filldir)
{
+ int over;
int err = 0;
struct p9_fid *fid;
int buflen;
struct p9_rdir *rdir;
struct p9_dirent curdirent;
+ u64 oldoffset = 0;
- p9_debug(P9_DEBUG_VFS, "name %s\n", file->f_path.dentry->d_name.name);
- fid = file->private_data;
+ p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
+ fid = filp->private_data;
buflen = fid->clnt->msize - P9_READDIRHDRSZ;
- rdir = v9fs_alloc_rdir_buf(file, buflen);
+ rdir = v9fs_alloc_rdir_buf(filp, buflen);
if (!rdir)
return -ENOMEM;
while (1) {
if (rdir->tail == rdir->head) {
err = p9_client_readdir(fid, rdir->buf, buflen,
- ctx->pos);
+ filp->f_pos);
if (err <= 0)
return err;
@@ -203,13 +210,22 @@ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx)
return -EIO;
}
- if (!dir_emit(ctx, curdirent.d_name,
- strlen(curdirent.d_name),
- v9fs_qid2ino(&curdirent.qid),
- curdirent.d_type))
+ /* d_off in dirent structure tracks the offset into
+ * the next dirent in the dir. However, filldir()
+ * expects offset into the current dirent. Hence
+ * while calling filldir send the offset from the
+ * previous dirent structure.
+ */
+ over = filldir(dirent, curdirent.d_name,
+ strlen(curdirent.d_name),
+ oldoffset, v9fs_qid2ino(&curdirent.qid),
+ curdirent.d_type);
+ oldoffset = curdirent.d_off;
+
+ if (over)
return 0;
- ctx->pos = curdirent.d_off;
+ filp->f_pos = curdirent.d_off;
rdir->head += err;
}
}
@@ -238,7 +254,7 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
const struct file_operations v9fs_dir_operations = {
.read = generic_read_dir,
.llseek = generic_file_llseek,
- .iterate = v9fs_dir_readdir,
+ .readdir = v9fs_dir_readdir,
.open = v9fs_file_open,
.release = v9fs_dir_release,
};
@@ -246,7 +262,7 @@ const struct file_operations v9fs_dir_operations = {
const struct file_operations v9fs_dir_operations_dotl = {
.read = generic_read_dir,
.llseek = generic_file_llseek,
- .iterate = v9fs_dir_readdir_dotl,
+ .readdir = v9fs_dir_readdir_dotl,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.fsync = v9fs_file_fsync_dotl,
diff --git a/trunk/fs/adfs/dir.c b/trunk/fs/adfs/dir.c
index ade28bb058e3..9cf874ce8336 100644
--- a/trunk/fs/adfs/dir.c
+++ b/trunk/fs/adfs/dir.c
@@ -17,43 +17,47 @@
static DEFINE_RWLOCK(adfs_dir_lock);
static int
-adfs_readdir(struct file *file, struct dir_context *ctx)
+adfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
struct object_info obj;
struct adfs_dir dir;
int ret = 0;
- if (ctx->pos >> 32)
- return 0;
+ if (filp->f_pos >> 32)
+ goto out;
ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
if (ret)
- return ret;
+ goto out;
- if (ctx->pos == 0) {
- if (!dir_emit_dot(file, ctx))
+ switch ((unsigned long)filp->f_pos) {
+ case 0:
+ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
goto free_out;
- ctx->pos = 1;
- }
- if (ctx->pos == 1) {
- if (!dir_emit(ctx, "..", 2, dir.parent_id, DT_DIR))
+ filp->f_pos += 1;
+
+ case 1:
+ if (filldir(dirent, "..", 2, 1, dir.parent_id, DT_DIR) < 0)
goto free_out;
- ctx->pos = 2;
+ filp->f_pos += 1;
+
+ default:
+ break;
}
read_lock(&adfs_dir_lock);
- ret = ops->setpos(&dir, ctx->pos - 2);
+ ret = ops->setpos(&dir, filp->f_pos - 2);
if (ret)
goto unlock_out;
while (ops->getnext(&dir, &obj) == 0) {
- if (!dir_emit(ctx, obj.name, obj.name_len,
- obj.file_id, DT_UNKNOWN))
- break;
- ctx->pos++;
+ if (filldir(dirent, obj.name, obj.name_len,
+ filp->f_pos, obj.file_id, DT_UNKNOWN) < 0)
+ goto unlock_out;
+ filp->f_pos += 1;
}
unlock_out:
@@ -61,6 +65,8 @@ adfs_readdir(struct file *file, struct dir_context *ctx)
free_out:
ops->free(&dir);
+
+out:
return ret;
}
@@ -186,7 +192,7 @@ adfs_dir_lookup_byname(struct inode *inode, struct qstr *name, struct object_inf
const struct file_operations adfs_dir_operations = {
.read = generic_read_dir,
.llseek = generic_file_llseek,
- .iterate = adfs_readdir,
+ .readdir = adfs_readdir,
.fsync = generic_file_fsync,
};
diff --git a/trunk/fs/affs/dir.c b/trunk/fs/affs/dir.c
index f1eba8c3644e..fd11a6d608ee 100644
--- a/trunk/fs/affs/dir.c
+++ b/trunk/fs/affs/dir.c
@@ -15,12 +15,12 @@
#include "affs.h"
-static int affs_readdir(struct file *, struct dir_context *);
+static int affs_readdir(struct file *, void *, filldir_t);
const struct file_operations affs_dir_operations = {
.read = generic_read_dir,
.llseek = generic_file_llseek,
- .iterate = affs_readdir,
+ .readdir = affs_readdir,
.fsync = affs_file_fsync,
};
@@ -40,35 +40,52 @@ const struct inode_operations affs_dir_inode_operations = {
};
static int
-affs_readdir(struct file *file, struct dir_context *ctx)
+affs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
- struct buffer_head *dir_bh = NULL;
- struct buffer_head *fh_bh = NULL;
+ struct buffer_head *dir_bh;
+ struct buffer_head *fh_bh;
unsigned char *name;
int namelen;
u32 i;
int hash_pos;
int chain_pos;
+ u32 f_pos;
u32 ino;
+ int stored;
+ int res;
- pr_debug("AFFS: readdir(ino=%lu,f_pos=%lx)\n",inode->i_ino,(unsigned long)ctx->pos);
+ pr_debug("AFFS: readdir(ino=%lu,f_pos=%lx)\n",inode->i_ino,(unsigned long)filp->f_pos);
- if (ctx->pos < 2) {
- file->private_data = (void *)0;
- if (!dir_emit_dots(file, ctx))
+ stored = 0;
+ res = -EIO;
+ dir_bh = NULL;
+ fh_bh = NULL;
+ f_pos = filp->f_pos;
+
+ if (f_pos == 0) {
+ filp->private_data = (void *)0;
+ if (filldir(dirent, ".", 1, f_pos, inode->i_ino, DT_DIR) < 0)
return 0;
+ filp->f_pos = f_pos = 1;
+ stored++;
+ }
+ if (f_pos == 1) {
+ if (filldir(dirent, "..", 2, f_pos, parent_ino(filp->f_path.dentry), DT_DIR) < 0)
+ return stored;
+ filp->f_pos = f_pos = 2;
+ stored++;
}
affs_lock_dir(inode);
- chain_pos = (ctx->pos - 2) & 0xffff;
- hash_pos = (ctx->pos - 2) >> 16;
+ chain_pos = (f_pos - 2) & 0xffff;
+ hash_pos = (f_pos - 2) >> 16;
if (chain_pos == 0xffff) {
affs_warning(sb, "readdir", "More than 65535 entries in chain");
chain_pos = 0;
hash_pos++;
- ctx->pos = ((hash_pos << 16) | chain_pos) + 2;
+ filp->f_pos = ((hash_pos << 16) | chain_pos) + 2;
}
dir_bh = affs_bread(sb, inode->i_ino);
if (!dir_bh)
@@ -77,8 +94,8 @@ affs_readdir(struct file *file, struct dir_context *ctx)
/* If the directory hasn't changed since the last call to readdir(),
* we can jump directly to where we left off.
*/
- ino = (u32)(long)file->private_data;
- if (ino && file->f_version == inode->i_version) {
+ ino = (u32)(long)filp->private_data;
+ if (ino && filp->f_version == inode->i_version) {
pr_debug("AFFS: readdir() left off=%d\n", ino);
goto inside;
}
@@ -88,7 +105,7 @@ affs_readdir(struct file *file, struct dir_context *ctx)
fh_bh = affs_bread(sb, ino);
if (!fh_bh) {
affs_error(sb, "readdir","Cannot read block %d", i);
- return -EIO;
+ goto readdir_out;
}
ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain);
affs_brelse(fh_bh);
@@ -102,34 +119,38 @@ affs_readdir(struct file *file, struct dir_context *ctx)
ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]);
if (!ino)
continue;
- ctx->pos = (hash_pos << 16) + 2;
+ f_pos = (hash_pos << 16) + 2;
inside:
do {
fh_bh = affs_bread(sb, ino);
if (!fh_bh) {
affs_error(sb, "readdir","Cannot read block %d", ino);
- break;
+ goto readdir_done;
}
namelen = min(AFFS_TAIL(sb, fh_bh)->name[0], (u8)30);
name = AFFS_TAIL(sb, fh_bh)->name + 1;
pr_debug("AFFS: readdir(): filldir(\"%.*s\", ino=%u), hash=%d, f_pos=%x\n",
- namelen, name, ino, hash_pos, (u32)ctx->pos);
- if (!dir_emit(ctx, name, namelen, ino, DT_UNKNOWN))
+ namelen, name, ino, hash_pos, f_pos);
+ if (filldir(dirent, name, namelen, f_pos, ino, DT_UNKNOWN) < 0)
goto readdir_done;
- ctx->pos++;
+ stored++;
+ f_pos++;
ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain);
affs_brelse(fh_bh);
fh_bh = NULL;
} while (ino);
}
readdir_done:
- file->f_version = inode->i_version;
- file->private_data = (void *)(long)ino;
+ filp->f_pos = f_pos;
+ filp->f_version = inode->i_version;
+ filp->private_data = (void *)(long)ino;
+ res = stored;
readdir_out:
affs_brelse(dir_bh);
affs_brelse(fh_bh);
affs_unlock_dir(inode);
- return 0;
+ pr_debug("AFFS: readdir()=%d\n", stored);
+ return res;
}
diff --git a/trunk/fs/afs/dir.c b/trunk/fs/afs/dir.c
index 34494fbead0a..7a465ed04444 100644
--- a/trunk/fs/afs/dir.c
+++ b/trunk/fs/afs/dir.c
@@ -22,7 +22,7 @@
static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
static int afs_dir_open(struct inode *inode, struct file *file);
-static int afs_readdir(struct file *file, struct dir_context *ctx);
+static int afs_readdir(struct file *file, void *dirent, filldir_t filldir);
static int afs_d_revalidate(struct dentry *dentry, unsigned int flags);
static int afs_d_delete(const struct dentry *dentry);
static void afs_d_release(struct dentry *dentry);
@@ -43,7 +43,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
const struct file_operations afs_dir_file_operations = {
.open = afs_dir_open,
.release = afs_release,
- .iterate = afs_readdir,
+ .readdir = afs_readdir,
.lock = afs_lock,
.llseek = generic_file_llseek,
};
@@ -119,9 +119,9 @@ struct afs_dir_page {
};
struct afs_lookup_cookie {
- struct dir_context ctx;
struct afs_fid fid;
- struct qstr name;
+ const char *name;
+ size_t nlen;
int found;
};
@@ -228,18 +228,20 @@ static int afs_dir_open(struct inode *inode, struct file *file)
/*
* deal with one block in an AFS directory
*/
-static int afs_dir_iterate_block(struct dir_context *ctx,
+static int afs_dir_iterate_block(unsigned *fpos,
union afs_dir_block *block,
- unsigned blkoff)
+ unsigned blkoff,
+ void *cookie,
+ filldir_t filldir)
{
union afs_dirent *dire;
unsigned offset, next, curr;
size_t nlen;
- int tmp;
+ int tmp, ret;
- _enter("%u,%x,%p,,",(unsigned)ctx->pos,blkoff,block);
+ _enter("%u,%x,%p,,",*fpos,blkoff,block);
- curr = (ctx->pos - blkoff) / sizeof(union afs_dirent);
+ curr = (*fpos - blkoff) / sizeof(union afs_dirent);
/* walk through the block, an entry at a time */
for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries;
@@ -254,7 +256,7 @@ static int afs_dir_iterate_block(struct dir_context *ctx,
_debug("ENT[%Zu.%u]: unused",
blkoff / sizeof(union afs_dir_block), offset);
if (offset >= curr)
- ctx->pos = blkoff +
+ *fpos = blkoff +
next * sizeof(union afs_dirent);
continue;
}
@@ -300,15 +302,19 @@ static int afs_dir_iterate_block(struct dir_context *ctx,
continue;
/* found the next entry */
- if (!dir_emit(ctx, dire->u.name, nlen,
+ ret = filldir(cookie,
+ dire->u.name,
+ nlen,
+ blkoff + offset * sizeof(union afs_dirent),
ntohl(dire->u.vnode),
- ctx->actor == afs_lookup_filldir ?
- ntohl(dire->u.unique) : DT_UNKNOWN)) {
+ filldir == afs_lookup_filldir ?
+ ntohl(dire->u.unique) : DT_UNKNOWN);
+ if (ret < 0) {
_leave(" = 0 [full]");
return 0;
}
- ctx->pos = blkoff + next * sizeof(union afs_dirent);
+ *fpos = blkoff + next * sizeof(union afs_dirent);
}
_leave(" = 1 [more]");
@@ -318,8 +324,8 @@ static int afs_dir_iterate_block(struct dir_context *ctx,
/*
* iterate through the data blob that lists the contents of an AFS directory
*/
-static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
- struct key *key)
+static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
+ filldir_t filldir, struct key *key)
{
union afs_dir_block *dblock;
struct afs_dir_page *dbuf;
@@ -327,7 +333,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
unsigned blkoff, limit;
int ret;
- _enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos);
+ _enter("{%lu},%u,,", dir->i_ino, *fpos);
if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) {
_leave(" = -ESTALE");
@@ -335,13 +341,13 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
}
/* round the file position up to the next entry boundary */
- ctx->pos += sizeof(union afs_dirent) - 1;
- ctx->pos &= ~(sizeof(union afs_dirent) - 1);
+ *fpos += sizeof(union afs_dirent) - 1;
+ *fpos &= ~(sizeof(union afs_dirent) - 1);
/* walk through the blocks in sequence */
ret = 0;
- while (ctx->pos < dir->i_size) {
- blkoff = ctx->pos & ~(sizeof(union afs_dir_block) - 1);
+ while (*fpos < dir->i_size) {
+ blkoff = *fpos & ~(sizeof(union afs_dir_block) - 1);
/* fetch the appropriate page from the directory */
page = afs_dir_get_page(dir, blkoff / PAGE_SIZE, key);
@@ -358,7 +364,8 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
do {
dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) /
sizeof(union afs_dir_block)];
- ret = afs_dir_iterate_block(ctx, dblock, blkoff);
+ ret = afs_dir_iterate_block(fpos, dblock, blkoff,
+ cookie, filldir);
if (ret != 1) {
afs_dir_put_page(page);
goto out;
@@ -366,7 +373,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
blkoff += sizeof(union afs_dir_block);
- } while (ctx->pos < dir->i_size && blkoff < limit);
+ } while (*fpos < dir->i_size && blkoff < limit);
afs_dir_put_page(page);
ret = 0;
@@ -380,10 +387,23 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
/*
* read an AFS directory
*/
-static int afs_readdir(struct file *file, struct dir_context *ctx)
+static int afs_readdir(struct file *file, void *cookie, filldir_t filldir)
{
- return afs_dir_iterate(file_inode(file),
- ctx, file->private_data);
+ unsigned fpos;
+ int ret;
+
+ _enter("{%Ld,{%lu}}",
+ file->f_pos, file_inode(file)->i_ino);
+
+ ASSERT(file->private_data != NULL);
+
+ fpos = file->f_pos;
+ ret = afs_dir_iterate(file_inode(file), &fpos,
+ cookie, filldir, file->private_data);
+ file->f_pos = fpos;
+
+ _leave(" = %d", ret);
+ return ret;
}
/*
@@ -396,16 +416,15 @@ static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
{
struct afs_lookup_cookie *cookie = _cookie;
- _enter("{%s,%u},%s,%u,,%llu,%u",
- cookie->name.name, cookie->name.len, name, nlen,
+ _enter("{%s,%Zu},%s,%u,,%llu,%u",
+ cookie->name, cookie->nlen, name, nlen,
(unsigned long long) ino, dtype);
/* insanity checks first */
BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
- if (cookie->name.len != nlen ||
- memcmp(cookie->name.name, name, nlen) != 0) {
+ if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) {
_leave(" = 0 [no]");
return 0;
}
@@ -425,18 +444,24 @@ static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
static int afs_do_lookup(struct inode *dir, struct dentry *dentry,
struct afs_fid *fid, struct key *key)
{
- struct afs_super_info *as = dir->i_sb->s_fs_info;
- struct afs_lookup_cookie cookie = {
- .ctx.actor = afs_lookup_filldir,
- .name = dentry->d_name,
- .fid.vid = as->volume->vid
- };
+ struct afs_lookup_cookie cookie;
+ struct afs_super_info *as;
+ unsigned fpos;
int ret;
_enter("{%lu},%p{%s},", dir->i_ino, dentry, dentry->d_name.name);
+ as = dir->i_sb->s_fs_info;
+
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie.ctx, key);
+ cookie.name = dentry->d_name.name;
+ cookie.nlen = dentry->d_name.len;
+ cookie.fid.vid = as->volume->vid;
+ cookie.found = 0;
+
+ fpos = 0;
+ ret = afs_dir_iterate(dir, &fpos, &cookie, afs_lookup_filldir,
+ key);
if (ret < 0) {
_leave(" = %d [iter]", ret);
return ret;
diff --git a/trunk/fs/afs/file.c b/trunk/fs/afs/file.c
index 66d50fe2ee45..8f6e9234d565 100644
--- a/trunk/fs/afs/file.c
+++ b/trunk/fs/afs/file.c
@@ -19,8 +19,7 @@
#include "internal.h"
static int afs_readpage(struct file *file, struct page *page);
-static void afs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
+static void afs_invalidatepage(struct page *page, unsigned long offset);
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
static int afs_launder_page(struct page *page);
@@ -311,17 +310,16 @@ static int afs_launder_page(struct page *page)
* - release a page and clean up its private data if offset is 0 (indicating
* the entire page)
*/
-static void afs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void afs_invalidatepage(struct page *page, unsigned long offset)
{
struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
- _enter("{%lu},%u,%u", page->index, offset, length);
+ _enter("{%lu},%lu", page->index, offset);
BUG_ON(!PageLocked(page));
/* we clean up only if the entire page is being invalidated */
- if (offset == 0 && length == PAGE_CACHE_SIZE) {
+ if (offset == 0) {
#ifdef CONFIG_AFS_FSCACHE
if (PageFsCache(page)) {
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
diff --git a/trunk/fs/autofs4/root.c b/trunk/fs/autofs4/root.c
index ca8e55548d98..085da86e07c2 100644
--- a/trunk/fs/autofs4/root.c
+++ b/trunk/fs/autofs4/root.c
@@ -41,7 +41,7 @@ const struct file_operations autofs4_root_operations = {
.open = dcache_dir_open,
.release = dcache_dir_close,
.read = generic_read_dir,
- .iterate = dcache_readdir,
+ .readdir = dcache_readdir,
.llseek = dcache_dir_lseek,
.unlocked_ioctl = autofs4_root_ioctl,
#ifdef CONFIG_COMPAT
@@ -53,7 +53,7 @@ const struct file_operations autofs4_dir_operations = {
.open = autofs4_dir_open,
.release = dcache_dir_close,
.read = generic_read_dir,
- .iterate = dcache_readdir,
+ .readdir = dcache_readdir,
.llseek = dcache_dir_lseek,
};
diff --git a/trunk/fs/bad_inode.c b/trunk/fs/bad_inode.c
index 7c93953030fb..922ad460bff9 100644
--- a/trunk/fs/bad_inode.c
+++ b/trunk/fs/bad_inode.c
@@ -45,7 +45,7 @@ static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
return -EIO;
}
-static int bad_file_readdir(struct file *file, struct dir_context *ctx)
+static int bad_file_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
return -EIO;
}
@@ -152,7 +152,7 @@ static const struct file_operations bad_file_ops =
.write = bad_file_write,
.aio_read = bad_file_aio_read,
.aio_write = bad_file_aio_write,
- .iterate = bad_file_readdir,
+ .readdir = bad_file_readdir,
.poll = bad_file_poll,
.unlocked_ioctl = bad_file_unlocked_ioctl,
.compat_ioctl = bad_file_compat_ioctl,
diff --git a/trunk/fs/befs/linuxvfs.c b/trunk/fs/befs/linuxvfs.c
index e9c75e20db32..f95dddced968 100644
--- a/trunk/fs/befs/linuxvfs.c
+++ b/trunk/fs/befs/linuxvfs.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL");
/* The units the vfs expects inode->i_blocks to be in */
#define VFS_BLOCK_SIZE 512
-static int befs_readdir(struct file *, struct dir_context *);
+static int befs_readdir(struct file *, void *, filldir_t);
static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
static int befs_readpage(struct file *file, struct page *page);
static sector_t befs_bmap(struct address_space *mapping, sector_t block);
@@ -66,7 +66,7 @@ static struct kmem_cache *befs_inode_cachep;
static const struct file_operations befs_dir_operations = {
.read = generic_read_dir,
- .iterate = befs_readdir,
+ .readdir = befs_readdir,
.llseek = generic_file_llseek,
};
@@ -211,9 +211,9 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
}
static int
-befs_readdir(struct file *file, struct dir_context *ctx)
+befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
befs_off_t value;
@@ -221,14 +221,15 @@ befs_readdir(struct file *file, struct dir_context *ctx)
size_t keysize;
unsigned char d_type;
char keybuf[BEFS_NAME_LEN + 1];
- const char *dirname = file->f_path.dentry->d_name.name;
+ char *nlsname;
+ int nlsnamelen;
+ const char *dirname = filp->f_path.dentry->d_name.name;
befs_debug(sb, "---> befs_readdir() "
- "name %s, inode %ld, ctx->pos %Ld",
- dirname, inode->i_ino, ctx->pos);
+ "name %s, inode %ld, filp->f_pos %Ld",
+ dirname, inode->i_ino, filp->f_pos);
-more:
- result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1,
+ result = befs_btree_read(sb, ds, filp->f_pos, BEFS_NAME_LEN + 1,
keybuf, &keysize, &value);
if (result == BEFS_ERR) {
@@ -250,29 +251,24 @@ befs_readdir(struct file *file, struct dir_context *ctx)
/* Convert to NLS */
if (BEFS_SB(sb)->nls) {
- char *nlsname;
- int nlsnamelen;
result =
befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen);
if (result < 0) {
befs_debug(sb, "<--- befs_readdir() ERROR");
return result;
}
- if (!dir_emit(ctx, nlsname, nlsnamelen,
- (ino_t) value, d_type)) {
- kfree(nlsname);
- return 0;
- }
+ result = filldir(dirent, nlsname, nlsnamelen, filp->f_pos,
+ (ino_t) value, d_type);
kfree(nlsname);
+
} else {
- if (!dir_emit(ctx, keybuf, keysize,
- (ino_t) value, d_type))
- return 0;
+ result = filldir(dirent, keybuf, keysize, filp->f_pos,
+ (ino_t) value, d_type);
}
- ctx->pos++;
- goto more;
+ if (!result)
+ filp->f_pos++;
- befs_debug(sb, "<--- befs_readdir() pos %Ld", ctx->pos);
+ befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos);
return 0;
}
diff --git a/trunk/fs/bfs/dir.c b/trunk/fs/bfs/dir.c
index a399e6d9dc74..3f422f6bb5ca 100644
--- a/trunk/fs/bfs/dir.c
+++ b/trunk/fs/bfs/dir.c
@@ -26,51 +26,58 @@ static struct buffer_head *bfs_find_entry(struct inode *dir,
const unsigned char *name, int namelen,
struct bfs_dirent **res_dir);
-static int bfs_readdir(struct file *f, struct dir_context *ctx)
+static int bfs_readdir(struct file *f, void *dirent, filldir_t filldir)
{
struct inode *dir = file_inode(f);
struct buffer_head *bh;
struct bfs_dirent *de;
+ struct bfs_sb_info *info = BFS_SB(dir->i_sb);
unsigned int offset;
int block;
- if (ctx->pos & (BFS_DIRENT_SIZE - 1)) {
+ mutex_lock(&info->bfs_lock);
+
+ if (f->f_pos & (BFS_DIRENT_SIZE - 1)) {
printf("Bad f_pos=%08lx for %s:%08lx\n",
- (unsigned long)ctx->pos,
+ (unsigned long)f->f_pos,
dir->i_sb->s_id, dir->i_ino);
- return -EINVAL;
+ mutex_unlock(&info->bfs_lock);
+ return -EBADF;
}
- while (ctx->pos < dir->i_size) {
- offset = ctx->pos & (BFS_BSIZE - 1);
- block = BFS_I(dir)->i_sblock + (ctx->pos >> BFS_BSIZE_BITS);
+ while (f->f_pos < dir->i_size) {
+ offset = f->f_pos & (BFS_BSIZE - 1);
+ block = BFS_I(dir)->i_sblock + (f->f_pos >> BFS_BSIZE_BITS);
bh = sb_bread(dir->i_sb, block);
if (!bh) {
- ctx->pos += BFS_BSIZE - offset;
+ f->f_pos += BFS_BSIZE - offset;
continue;
}
do {
de = (struct bfs_dirent *)(bh->b_data + offset);
if (de->ino) {
int size = strnlen(de->name, BFS_NAMELEN);
- if (!dir_emit(ctx, de->name, size,
+ if (filldir(dirent, de->name, size, f->f_pos,
le16_to_cpu(de->ino),
- DT_UNKNOWN)) {
+ DT_UNKNOWN) < 0) {
brelse(bh);
+ mutex_unlock(&info->bfs_lock);
return 0;
}
}
offset += BFS_DIRENT_SIZE;
- ctx->pos += BFS_DIRENT_SIZE;
- } while ((offset < BFS_BSIZE) && (ctx->pos < dir->i_size));
+ f->f_pos += BFS_DIRENT_SIZE;
+ } while ((offset < BFS_BSIZE) && (f->f_pos < dir->i_size));
brelse(bh);
}
- return 0;
+
+ mutex_unlock(&info->bfs_lock);
+ return 0;
}
const struct file_operations bfs_dir_operations = {
.read = generic_read_dir,
- .iterate = bfs_readdir,
+ .readdir = bfs_readdir,
.fsync = generic_file_fsync,
.llseek = generic_file_llseek,
};
diff --git a/trunk/fs/btrfs/delayed-inode.c b/trunk/fs/btrfs/delayed-inode.c
index eb34438ddedb..f26f38ccd194 100644
--- a/trunk/fs/btrfs/delayed-inode.c
+++ b/trunk/fs/btrfs/delayed-inode.c
@@ -1681,7 +1681,8 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
* btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
*
*/
-int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
+ filldir_t filldir,
struct list_head *ins_list)
{
struct btrfs_dir_item *di;
@@ -1703,13 +1704,13 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
list_del(&curr->readdir_list);
- if (curr->key.offset < ctx->pos) {
+ if (curr->key.offset < filp->f_pos) {
if (atomic_dec_and_test(&curr->refs))
kfree(curr);
continue;
}
- ctx->pos = curr->key.offset;
+ filp->f_pos = curr->key.offset;
di = (struct btrfs_dir_item *)curr->data;
name = (char *)(di + 1);
@@ -1718,7 +1719,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
d_type = btrfs_filetype_table[di->type];
btrfs_disk_key_to_cpu(&location, &di->location);
- over = !dir_emit(ctx, name, name_len,
+ over = filldir(dirent, name, name_len, curr->key.offset,
location.objectid, d_type);
if (atomic_dec_and_test(&curr->refs))
diff --git a/trunk/fs/btrfs/delayed-inode.h b/trunk/fs/btrfs/delayed-inode.h
index a4b38f934d14..1d5c5f7abe3e 100644
--- a/trunk/fs/btrfs/delayed-inode.h
+++ b/trunk/fs/btrfs/delayed-inode.h
@@ -139,7 +139,8 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
struct list_head *del_list);
int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index);
-int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
+ filldir_t filldir,
struct list_head *ins_list);
/* for init */
diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c
index b0292b3ead54..b8b60b660c8f 100644
--- a/trunk/fs/btrfs/disk-io.c
+++ b/trunk/fs/btrfs/disk-io.c
@@ -1013,8 +1013,7 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
return try_release_extent_buffer(page);
}
-static void btree_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void btree_invalidatepage(struct page *page, unsigned long offset)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(page->mapping->host)->io_tree;
diff --git a/trunk/fs/btrfs/extent_io.c b/trunk/fs/btrfs/extent_io.c
index 6bca9472f313..e7e7afb4a872 100644
--- a/trunk/fs/btrfs/extent_io.c
+++ b/trunk/fs/btrfs/extent_io.c
@@ -2957,7 +2957,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
if (page->index > end_index ||
(page->index == end_index && !pg_offset)) {
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ page->mapping->a_ops->invalidatepage(page, 0);
unlock_page(page);
return 0;
}
diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c
index 4f9d16b70d3d..17f3064b4a3e 100644
--- a/trunk/fs/btrfs/inode.c
+++ b/trunk/fs/btrfs/inode.c
@@ -5137,9 +5137,10 @@ unsigned char btrfs_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
-static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
+static int btrfs_real_readdir(struct file *filp, void *dirent,
+ filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_item *item;
struct btrfs_dir_item *di;
@@ -5160,15 +5161,29 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
char tmp_name[32];
char *name_ptr;
int name_len;
- int is_curr = 0; /* ctx->pos points to the current index? */
+ int is_curr = 0; /* filp->f_pos points to the current index? */
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
key_type = BTRFS_DIR_ITEM_KEY;
- if (!dir_emit_dots(file, ctx))
- return 0;
-
+ /* special case for "." */
+ if (filp->f_pos == 0) {
+ over = filldir(dirent, ".", 1,
+ filp->f_pos, btrfs_ino(inode), DT_DIR);
+ if (over)
+ return 0;
+ filp->f_pos = 1;
+ }
+ /* special case for .., just use the back ref */
+ if (filp->f_pos == 1) {
+ u64 pino = parent_ino(filp->f_path.dentry);
+ over = filldir(dirent, "..", 2,
+ filp->f_pos, pino, DT_DIR);
+ if (over)
+ return 0;
+ filp->f_pos = 2;
+ }
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -5182,7 +5197,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
}
btrfs_set_key_type(&key, key_type);
- key.offset = ctx->pos;
+ key.offset = filp->f_pos;
key.objectid = btrfs_ino(inode);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -5208,14 +5223,14 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
break;
if (btrfs_key_type(&found_key) != key_type)
break;
- if (found_key.offset < ctx->pos)
+ if (found_key.offset < filp->f_pos)
goto next;
if (key_type == BTRFS_DIR_INDEX_KEY &&
btrfs_should_delete_dir_index(&del_list,
found_key.offset))
goto next;
- ctx->pos = found_key.offset;
+ filp->f_pos = found_key.offset;
is_curr = 1;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
@@ -5259,8 +5274,9 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
over = 0;
goto skip;
}
- over = !dir_emit(ctx, name_ptr, name_len,
- location.objectid, d_type);
+ over = filldir(dirent, name_ptr, name_len,
+ found_key.offset, location.objectid,
+ d_type);
skip:
if (name_ptr != tmp_name)
@@ -5279,8 +5295,9 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
if (key_type == BTRFS_DIR_INDEX_KEY) {
if (is_curr)
- ctx->pos++;
- ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
+ filp->f_pos++;
+ ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
+ &ins_list);
if (ret)
goto nopos;
}
@@ -5291,9 +5308,9 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
* 32-bit glibc will use getdents64, but then strtol -
* so the last number we can serve is this.
*/
- ctx->pos = 0x7fffffff;
+ filp->f_pos = 0x7fffffff;
else
- ctx->pos++;
+ filp->f_pos++;
nopos:
ret = 0;
err:
@@ -7493,8 +7510,7 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
}
-static void btrfs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void btrfs_invalidatepage(struct page *page, unsigned long offset)
{
struct inode *inode = page->mapping->host;
struct extent_io_tree *tree;
@@ -8715,7 +8731,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
static const struct file_operations btrfs_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = btrfs_real_readdir,
+ .readdir = btrfs_real_readdir,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_ioctl,
diff --git a/trunk/fs/buffer.c b/trunk/fs/buffer.c
index f93392e2df12..d2a4d1bb2d57 100644
--- a/trunk/fs/buffer.c
+++ b/trunk/fs/buffer.c
@@ -1454,8 +1454,7 @@ static void discard_buffer(struct buffer_head * bh)
* block_invalidatepage - invalidate part or all of a buffer-backed page
*
* @page: the page which is affected
- * @offset: start of the range to invalidate
- * @length: length of the range to invalidate
+ * @offset: the index of the truncation point
*
* block_invalidatepage() is called when all or part of the page has become
* invalidated by a truncate operation.
@@ -1466,34 +1465,21 @@ static void discard_buffer(struct buffer_head * bh)
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-void block_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+void block_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
- unsigned int stop = length + offset;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
goto out;
- /*
- * Check for overflow
- */
- BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
-
head = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
next = bh->b_this_page;
- /*
- * Are we still fully in range ?
- */
- if (next_off > stop)
- goto out;
-
/*
* is this block fully invalidated?
*/
@@ -1515,7 +1501,6 @@ void block_invalidatepage(struct page *page, unsigned int offset,
}
EXPORT_SYMBOL(block_invalidatepage);
-
/*
* We attach and possibly dirty the buffers atomically wrt
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers
@@ -2856,7 +2841,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
* they may have been added in ext3_writepage(). Make them
* freeable here, so the page does not leak.
*/
- do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ do_invalidatepage(page, 0);
unlock_page(page);
return 0; /* don't care */
}
diff --git a/trunk/fs/ceph/addr.c b/trunk/fs/ceph/addr.c
index 38b5c1bc6776..3e68ac101040 100644
--- a/trunk/fs/ceph/addr.c
+++ b/trunk/fs/ceph/addr.c
@@ -143,8 +143,7 @@ static int ceph_set_page_dirty(struct page *page)
* dirty page counters appropriately. Only called if there is private
* data on the page.
*/
-static void ceph_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void ceph_invalidatepage(struct page *page, unsigned long offset)
{
struct inode *inode;
struct ceph_inode_info *ci;
@@ -164,20 +163,20 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,
if (!PageDirty(page))
pr_err("%p invalidatepage %p page not dirty\n", inode, page);
- if (offset == 0 && length == PAGE_CACHE_SIZE)
+ if (offset == 0)
ClearPageChecked(page);
ci = ceph_inode(inode);
- if (offset == 0 && length == PAGE_CACHE_SIZE) {
- dout("%p invalidatepage %p idx %lu full dirty page\n",
- inode, page, page->index);
+ if (offset == 0) {
+ dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
+ inode, page, page->index, offset);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
ceph_put_snap_context(snapc);
page->private = 0;
ClearPagePrivate(page);
} else {
- dout("%p invalidatepage %p idx %lu partial dirty page %u(%u)\n",
- inode, page, page->index, offset, length);
+ dout("%p invalidatepage %p idx %lu partial dirty page\n",
+ inode, page, page->index);
}
}
diff --git a/trunk/fs/ceph/dir.c b/trunk/fs/ceph/dir.c
index a40ceda47a32..f02d82b7933e 100644
--- a/trunk/fs/ceph/dir.c
+++ b/trunk/fs/ceph/dir.c
@@ -111,10 +111,11 @@ static unsigned fpos_off(loff_t p)
* defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
* the MDS if/when the directory is modified).
*/
-static int __dcache_readdir(struct file *file, struct dir_context *ctx)
+static int __dcache_readdir(struct file *filp,
+ void *dirent, filldir_t filldir)
{
- struct ceph_file_info *fi = file->private_data;
- struct dentry *parent = file->f_dentry;
+ struct ceph_file_info *fi = filp->private_data;
+ struct dentry *parent = filp->f_dentry;
struct inode *dir = parent->d_inode;
struct list_head *p;
struct dentry *dentry, *last;
@@ -125,14 +126,14 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx)
last = fi->dentry;
fi->dentry = NULL;
- dout("__dcache_readdir %p at %llu (last %p)\n", dir, ctx->pos,
+ dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
last);
spin_lock(&parent->d_lock);
/* start at beginning? */
- if (ctx->pos == 2 || last == NULL ||
- ctx->pos < ceph_dentry(last)->offset) {
+ if (filp->f_pos == 2 || last == NULL ||
+ filp->f_pos < ceph_dentry(last)->offset) {
if (list_empty(&parent->d_subdirs))
goto out_unlock;
p = parent->d_subdirs.prev;
@@ -156,11 +157,11 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx)
if (!d_unhashed(dentry) && dentry->d_inode &&
ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
- ctx->pos <= di->offset)
+ filp->f_pos <= di->offset)
break;
dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
dentry->d_name.len, dentry->d_name.name, di->offset,
- ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
+ filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
!dentry->d_inode ? " null" : "");
spin_unlock(&dentry->d_lock);
p = p->prev;
@@ -172,27 +173,29 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx)
spin_unlock(&dentry->d_lock);
spin_unlock(&parent->d_lock);
- dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
+ dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
- ctx->pos = di->offset;
- if (!dir_emit(ctx, dentry->d_name.name,
- dentry->d_name.len,
+ filp->f_pos = di->offset;
+ err = filldir(dirent, dentry->d_name.name,
+ dentry->d_name.len, di->offset,
ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
- dentry->d_inode->i_mode >> 12)) {
- if (last) {
+ dentry->d_inode->i_mode >> 12);
+
+ if (last) {
+ if (err < 0) {
/* remember our position */
fi->dentry = last;
fi->next_offset = di->offset;
+ } else {
+ dput(last);
}
- dput(dentry);
- return 0;
}
-
- if (last)
- dput(last);
last = dentry;
- ctx->pos++;
+ if (err < 0)
+ goto out;
+
+ filp->f_pos++;
/* make sure a dentry wasn't dropped while we didn't have parent lock */
if (!ceph_dir_is_complete(dir)) {
@@ -232,59 +235,59 @@ static int note_last_dentry(struct ceph_file_info *fi, const char *name,
return 0;
}
-static int ceph_readdir(struct file *file, struct dir_context *ctx)
+static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct ceph_file_info *fi = file->private_data;
- struct inode *inode = file_inode(file);
+ struct ceph_file_info *fi = filp->private_data;
+ struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
- unsigned frag = fpos_frag(ctx->pos);
- int off = fpos_off(ctx->pos);
+ unsigned frag = fpos_frag(filp->f_pos);
+ int off = fpos_off(filp->f_pos);
int err;
u32 ftype;
struct ceph_mds_reply_info_parsed *rinfo;
const int max_entries = fsc->mount_options->max_readdir;
const int max_bytes = fsc->mount_options->max_readdir_bytes;
- dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
+ dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
if (fi->flags & CEPH_F_ATEND)
return 0;
/* always start with . and .. */
- if (ctx->pos == 0) {
+ if (filp->f_pos == 0) {
/* note dir version at start of readdir so we can tell
* if any dentries get dropped */
fi->dir_release_count = atomic_read(&ci->i_release_count);
dout("readdir off 0 -> '.'\n");
- if (!dir_emit(ctx, ".", 1,
+ if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
ceph_translate_ino(inode->i_sb, inode->i_ino),
- inode->i_mode >> 12))
+ inode->i_mode >> 12) < 0)
return 0;
- ctx->pos = 1;
+ filp->f_pos = 1;
off = 1;
}
- if (ctx->pos == 1) {
- ino_t ino = parent_ino(file->f_dentry);
+ if (filp->f_pos == 1) {
+ ino_t ino = parent_ino(filp->f_dentry);
dout("readdir off 1 -> '..'\n");
- if (!dir_emit(ctx, "..", 2,
+ if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
ceph_translate_ino(inode->i_sb, ino),
- inode->i_mode >> 12))
+ inode->i_mode >> 12) < 0)
return 0;
- ctx->pos = 2;
+ filp->f_pos = 2;
off = 2;
}
/* can we use the dcache? */
spin_lock(&ci->i_ceph_lock);
- if ((ctx->pos == 2 || fi->dentry) &&
+ if ((filp->f_pos == 2 || fi->dentry) &&
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
__ceph_dir_is_complete(ci) &&
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
spin_unlock(&ci->i_ceph_lock);
- err = __dcache_readdir(file, ctx);
+ err = __dcache_readdir(filp, dirent, filldir);
if (err != -EAGAIN)
return err;
} else {
@@ -324,7 +327,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
return PTR_ERR(req);
req->r_inode = inode;
ihold(inode);
- req->r_dentry = dget(file->f_dentry);
+ req->r_dentry = dget(filp->f_dentry);
/* hints to request -> mds selection code */
req->r_direct_mode = USE_AUTH_MDS;
req->r_direct_hash = ceph_frag_value(frag);
@@ -376,16 +379,15 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
rinfo = &fi->last_readdir->r_reply_info;
dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
rinfo->dir_nr, off, fi->offset);
-
- ctx->pos = ceph_make_fpos(frag, off);
while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
+ u64 pos = ceph_make_fpos(frag, off);
struct ceph_mds_reply_inode *in =
rinfo->dir_in[off - fi->offset].in;
struct ceph_vino vino;
ino_t ino;
dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
- off, off - fi->offset, rinfo->dir_nr, ctx->pos,
+ off, off - fi->offset, rinfo->dir_nr, pos,
rinfo->dir_dname_len[off - fi->offset],
rinfo->dir_dname[off - fi->offset], in);
BUG_ON(!in);
@@ -393,15 +395,16 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
vino.ino = le64_to_cpu(in->ino);
vino.snap = le64_to_cpu(in->snapid);
ino = ceph_vino_to_ino(vino);
- if (!dir_emit(ctx,
+ if (filldir(dirent,
rinfo->dir_dname[off - fi->offset],
rinfo->dir_dname_len[off - fi->offset],
- ceph_translate_ino(inode->i_sb, ino), ftype)) {
+ pos,
+ ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
dout("filldir stopping us...\n");
return 0;
}
off++;
- ctx->pos++;
+ filp->f_pos = pos + 1;
}
if (fi->last_name) {
@@ -414,7 +417,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
if (!ceph_frag_is_rightmost(frag)) {
frag = ceph_frag_next(frag);
off = 0;
- ctx->pos = ceph_make_fpos(frag, off);
+ filp->f_pos = ceph_make_fpos(frag, off);
dout("readdir next frag is %x\n", frag);
goto more;
}
@@ -429,11 +432,11 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
dout(" marking %p complete\n", inode);
__ceph_dir_set_complete(ci, fi->dir_release_count);
- ci->i_max_offset = ctx->pos;
+ ci->i_max_offset = filp->f_pos;
}
spin_unlock(&ci->i_ceph_lock);
- dout("readdir %p file %p done.\n", inode, file);
+ dout("readdir %p filp %p done.\n", inode, filp);
return 0;
}
@@ -1265,7 +1268,7 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
const struct file_operations ceph_dir_fops = {
.read = ceph_read_dir,
- .iterate = ceph_readdir,
+ .readdir = ceph_readdir,
.llseek = ceph_dir_llseek,
.open = ceph_open,
.release = ceph_release,
diff --git a/trunk/fs/cifs/cifsfs.c b/trunk/fs/cifs/cifsfs.c
index 540c1ccfcdb2..3752b9f6d9e4 100644
--- a/trunk/fs/cifs/cifsfs.c
+++ b/trunk/fs/cifs/cifsfs.c
@@ -968,7 +968,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
};
const struct file_operations cifs_dir_ops = {
- .iterate = cifs_readdir,
+ .readdir = cifs_readdir,
.release = cifs_closedir,
.read = generic_read_dir,
.unlocked_ioctl = cifs_ioctl,
diff --git a/trunk/fs/cifs/cifsfs.h b/trunk/fs/cifs/cifsfs.h
index d05b3028e3b9..0e32c3446ce9 100644
--- a/trunk/fs/cifs/cifsfs.h
+++ b/trunk/fs/cifs/cifsfs.h
@@ -101,7 +101,7 @@ extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
extern const struct file_operations cifs_dir_ops;
extern int cifs_dir_open(struct inode *inode, struct file *file);
-extern int cifs_readdir(struct file *file, struct dir_context *ctx);
+extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
/* Functions related to dir entries */
extern const struct dentry_operations cifs_dentry_ops;
diff --git a/trunk/fs/cifs/file.c b/trunk/fs/cifs/file.c
index 4d8ba8d491e5..48b29d24c9f4 100644
--- a/trunk/fs/cifs/file.c
+++ b/trunk/fs/cifs/file.c
@@ -3546,12 +3546,11 @@ static int cifs_release_page(struct page *page, gfp_t gfp)
return cifs_fscache_release_page(page, gfp);
}
-static void cifs_invalidate_page(struct page *page, unsigned int offset,
- unsigned int length)
+static void cifs_invalidate_page(struct page *page, unsigned long offset)
{
struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
- if (offset == 0 && length == PAGE_CACHE_SIZE)
+ if (offset == 0)
cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
}
diff --git a/trunk/fs/cifs/readdir.c b/trunk/fs/cifs/readdir.c
index f1213799de1a..770d5a9781c1 100644
--- a/trunk/fs/cifs/readdir.c
+++ b/trunk/fs/cifs/readdir.c
@@ -537,14 +537,14 @@ static int cifs_save_resume_key(const char *current_entry,
* every entry (do not increment for . or .. entry).
*/
static int
-find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon,
struct file *file, char **current_entry, int *num_to_ret)
{
__u16 search_flags;
int rc = 0;
int pos_in_buf = 0;
loff_t first_entry_in_buffer;
- loff_t index_to_find = pos;
+ loff_t index_to_find = file->f_pos;
struct cifsFileInfo *cfile = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -659,9 +659,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
return rc;
}
-static int cifs_filldir(char *find_entry, struct file *file,
- struct dir_context *ctx,
- char *scratch_buf, unsigned int max_len)
+static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir,
+ void *dirent, char *scratch_buf, unsigned int max_len)
{
struct cifsFileInfo *file_info = file->private_data;
struct super_block *sb = file->f_path.dentry->d_sb;
@@ -741,11 +740,13 @@ static int cifs_filldir(char *find_entry, struct file *file,
cifs_prime_dcache(file->f_dentry, &name, &fattr);
ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
- return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
+ rc = filldir(dirent, name.name, name.len, file->f_pos, ino,
+ fattr.cf_dtype);
+ return rc;
}
-int cifs_readdir(struct file *file, struct dir_context *ctx)
+int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
{
int rc = 0;
unsigned int xid;
@@ -771,86 +772,103 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
goto rddir2_exit;
}
- if (!dir_emit_dots(file, ctx))
- goto rddir2_exit;
-
- /* 1) If search is active,
- is in current search buffer?
- if it before then restart search
- if after then keep searching till find it */
-
- if (file->private_data == NULL) {
- rc = -EINVAL;
- goto rddir2_exit;
- }
- cifsFile = file->private_data;
- if (cifsFile->srch_inf.endOfSearch) {
- if (cifsFile->srch_inf.emptyDir) {
- cifs_dbg(FYI, "End of search, empty dir\n");
- rc = 0;
- goto rddir2_exit;
+ switch ((int) file->f_pos) {
+ case 0:
+ if (filldir(direntry, ".", 1, file->f_pos,
+ file_inode(file)->i_ino, DT_DIR) < 0) {
+ cifs_dbg(VFS, "Filldir for current dir failed\n");
+ rc = -ENOMEM;
+ break;
}
- } /* else {
- cifsFile->invalidHandle = true;
- tcon->ses->server->close(xid, tcon, &cifsFile->fid);
- } */
-
- tcon = tlink_tcon(cifsFile->tlink);
- rc = find_cifs_entry(xid, tcon, ctx->pos, file, ¤t_entry,
- &num_to_fill);
- if (rc) {
- cifs_dbg(FYI, "fce error %d\n", rc);
- goto rddir2_exit;
- } else if (current_entry != NULL) {
- cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
- } else {
- cifs_dbg(FYI, "could not find entry\n");
- goto rddir2_exit;
- }
- cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
- num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
- max_len = tcon->ses->server->ops->calc_smb_size(
- cifsFile->srch_inf.ntwrk_buf_start);
- end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
-
- tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);
- if (tmp_buf == NULL) {
- rc = -ENOMEM;
- goto rddir2_exit;
- }
-
- for (i = 0; i < num_to_fill; i++) {
- if (current_entry == NULL) {
- /* evaluate whether this case is an error */
- cifs_dbg(VFS, "past SMB end, num to fill %d i %d\n",
- num_to_fill, i);
+ file->f_pos++;
+ case 1:
+ if (filldir(direntry, "..", 2, file->f_pos,
+ parent_ino(file->f_path.dentry), DT_DIR) < 0) {
+ cifs_dbg(VFS, "Filldir for parent dir failed\n");
+ rc = -ENOMEM;
break;
}
- /*
- * if buggy server returns . and .. late do we want to
- * check for that here?
- */
- rc = cifs_filldir(current_entry, file, ctx,
- tmp_buf, max_len);
- if (rc) {
- if (rc > 0)
+ file->f_pos++;
+ default:
+ /* 1) If search is active,
+ is in current search buffer?
+ if it before then restart search
+ if after then keep searching till find it */
+
+ if (file->private_data == NULL) {
+ rc = -EINVAL;
+ free_xid(xid);
+ return rc;
+ }
+ cifsFile = file->private_data;
+ if (cifsFile->srch_inf.endOfSearch) {
+ if (cifsFile->srch_inf.emptyDir) {
+ cifs_dbg(FYI, "End of search, empty dir\n");
rc = 0;
+ break;
+ }
+ } /* else {
+ cifsFile->invalidHandle = true;
+ tcon->ses->server->close(xid, tcon, &cifsFile->fid);
+ } */
+
+ tcon = tlink_tcon(cifsFile->tlink);
+ rc = find_cifs_entry(xid, tcon, file, ¤t_entry,
+ &num_to_fill);
+ if (rc) {
+ cifs_dbg(FYI, "fce error %d\n", rc);
+ goto rddir2_exit;
+ } else if (current_entry != NULL) {
+ cifs_dbg(FYI, "entry %lld found\n", file->f_pos);
+ } else {
+ cifs_dbg(FYI, "could not find entry\n");
+ goto rddir2_exit;
+ }
+ cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
+ num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
+ max_len = tcon->ses->server->ops->calc_smb_size(
+ cifsFile->srch_inf.ntwrk_buf_start);
+ end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
+
+ tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);
+ if (tmp_buf == NULL) {
+ rc = -ENOMEM;
break;
}
- ctx->pos++;
- if (ctx->pos ==
- cifsFile->srch_inf.index_of_last_entry) {
- cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
- ctx->pos, tmp_buf);
- cifs_save_resume_key(current_entry, cifsFile);
- break;
- } else
- current_entry =
- nxt_dir_entry(current_entry, end_of_smb,
- cifsFile->srch_inf.info_level);
- }
- kfree(tmp_buf);
+ for (i = 0; (i < num_to_fill) && (rc == 0); i++) {
+ if (current_entry == NULL) {
+ /* evaluate whether this case is an error */
+ cifs_dbg(VFS, "past SMB end, num to fill %d i %d\n",
+ num_to_fill, i);
+ break;
+ }
+ /*
+ * if buggy server returns . and .. late do we want to
+ * check for that here?
+ */
+ rc = cifs_filldir(current_entry, file, filldir,
+ direntry, tmp_buf, max_len);
+ if (rc == -EOVERFLOW) {
+ rc = 0;
+ break;
+ }
+
+ file->f_pos++;
+ if (file->f_pos ==
+ cifsFile->srch_inf.index_of_last_entry) {
+ cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
+ file->f_pos, tmp_buf);
+ cifs_save_resume_key(current_entry, cifsFile);
+ break;
+ } else
+ current_entry =
+ nxt_dir_entry(current_entry, end_of_smb,
+ cifsFile->srch_inf.info_level);
+ }
+ kfree(tmp_buf);
+ break;
+ } /* end switch */
rddir2_exit:
free_xid(xid);
diff --git a/trunk/fs/coda/dir.c b/trunk/fs/coda/dir.c
index 87e0ee9f4465..b7d3a05c062c 100644
--- a/trunk/fs/coda/dir.c
+++ b/trunk/fs/coda/dir.c
@@ -43,14 +43,15 @@ static int coda_rename(struct inode *old_inode, struct dentry *old_dentry,
struct inode *new_inode, struct dentry *new_dentry);
/* dir file-ops */
-static int coda_readdir(struct file *file, struct dir_context *ctx);
+static int coda_readdir(struct file *file, void *buf, filldir_t filldir);
/* dentry ops */
static int coda_dentry_revalidate(struct dentry *de, unsigned int flags);
static int coda_dentry_delete(const struct dentry *);
/* support routines */
-static int coda_venus_readdir(struct file *, struct dir_context *);
+static int coda_venus_readdir(struct file *coda_file, void *buf,
+ filldir_t filldir);
/* same as fs/bad_inode.c */
static int coda_return_EIO(void)
@@ -84,7 +85,7 @@ const struct inode_operations coda_dir_inode_operations =
const struct file_operations coda_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = coda_readdir,
+ .readdir = coda_readdir,
.open = coda_open,
.release = coda_release,
.fsync = coda_fsync,
@@ -377,7 +378,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
/* file operations for directories */
-static int coda_readdir(struct file *coda_file, struct dir_context *ctx)
+static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
{
struct coda_file_info *cfi;
struct file *host_file;
@@ -390,19 +391,30 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx)
if (!host_file->f_op)
return -ENOTDIR;
- if (host_file->f_op->iterate) {
+ if (host_file->f_op->readdir)
+ {
+ /* potemkin case: we were handed a directory inode.
+ * We can't use vfs_readdir because we have to keep the file
+ * position in sync between the coda_file and the host_file.
+ * and as such we need grab the inode mutex. */
struct inode *host_inode = file_inode(host_file);
+
mutex_lock(&host_inode->i_mutex);
+ host_file->f_pos = coda_file->f_pos;
+
ret = -ENOENT;
if (!IS_DEADDIR(host_inode)) {
- ret = host_file->f_op->iterate(host_file, ctx);
+ ret = host_file->f_op->readdir(host_file, buf, filldir);
file_accessed(host_file);
}
+
+ coda_file->f_pos = host_file->f_pos;
mutex_unlock(&host_inode->i_mutex);
- return ret;
}
- /* Venus: we must read Venus dirents from a file */
- return coda_venus_readdir(coda_file, ctx);
+ else /* Venus: we must read Venus dirents from a file */
+ ret = coda_venus_readdir(coda_file, buf, filldir);
+
+ return ret;
}
static inline unsigned int CDT2DT(unsigned char cdt)
@@ -425,8 +437,10 @@ static inline unsigned int CDT2DT(unsigned char cdt)
}
/* support routines */
-static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx)
+static int coda_venus_readdir(struct file *coda_file, void *buf,
+ filldir_t filldir)
{
+ int result = 0; /* # of entries returned */
struct coda_file_info *cfi;
struct coda_inode_info *cii;
struct file *host_file;
@@ -448,12 +462,23 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx)
vdir = kmalloc(sizeof(*vdir), GFP_KERNEL);
if (!vdir) return -ENOMEM;
- if (!dir_emit_dots(coda_file, ctx))
- goto out;
-
+ if (coda_file->f_pos == 0) {
+ ret = filldir(buf, ".", 1, 0, de->d_inode->i_ino, DT_DIR);
+ if (ret < 0)
+ goto out;
+ result++;
+ coda_file->f_pos++;
+ }
+ if (coda_file->f_pos == 1) {
+ ret = filldir(buf, "..", 2, 1, parent_ino(de), DT_DIR);
+ if (ret < 0)
+ goto out;
+ result++;
+ coda_file->f_pos++;
+ }
while (1) {
/* read entries from the directory file */
- ret = kernel_read(host_file, ctx->pos - 2, (char *)vdir,
+ ret = kernel_read(host_file, coda_file->f_pos - 2, (char *)vdir,
sizeof(*vdir));
if (ret < 0) {
printk(KERN_ERR "coda readdir: read dir %s failed %d\n",
@@ -482,7 +507,7 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx)
/* Make sure we skip '.' and '..', we already got those */
if (name.name[0] == '.' && (name.len == 1 ||
- (name.name[1] == '.' && name.len == 2)))
+ (vdir->d_name[1] == '.' && name.len == 2)))
vdir->d_fileno = name.len = 0;
/* skip null entries */
@@ -495,16 +520,19 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx)
if (!ino) ino = vdir->d_fileno;
type = CDT2DT(vdir->d_type);
- if (!dir_emit(ctx, name.name, name.len, ino, type))
- break;
+ ret = filldir(buf, name.name, name.len,
+ coda_file->f_pos, ino, type);
+ /* failure means no space for filling in this round */
+ if (ret < 0) break;
+ result++;
}
/* we'll always have progress because d_reclen is unsigned and
* we've already established it is non-zero. */
- ctx->pos += vdir->d_reclen;
+ coda_file->f_pos += vdir->d_reclen;
}
out:
kfree(vdir);
- return 0;
+ return result ? result : ret;
}
/* called when a cache lookup succeeds */
diff --git a/trunk/fs/compat.c b/trunk/fs/compat.c
index 6af20de2c1a3..fc3b55dce184 100644
--- a/trunk/fs/compat.c
+++ b/trunk/fs/compat.c
@@ -832,7 +832,6 @@ struct compat_old_linux_dirent {
};
struct compat_readdir_callback {
- struct dir_context ctx;
struct compat_old_linux_dirent __user *dirent;
int result;
};
@@ -874,15 +873,15 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
{
int error;
struct fd f = fdget(fd);
- struct compat_readdir_callback buf = {
- .ctx.actor = compat_fillonedir,
- .dirent = dirent
- };
+ struct compat_readdir_callback buf;
if (!f.file)
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ buf.result = 0;
+ buf.dirent = dirent;
+
+ error = vfs_readdir(f.file, compat_fillonedir, &buf);
if (buf.result)
error = buf.result;
@@ -898,7 +897,6 @@ struct compat_linux_dirent {
};
struct compat_getdents_callback {
- struct dir_context ctx;
struct compat_linux_dirent __user *current_dir;
struct compat_linux_dirent __user *previous;
int count;
@@ -953,11 +951,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
{
struct fd f;
struct compat_linux_dirent __user * lastdirent;
- struct compat_getdents_callback buf = {
- .ctx.actor = compat_filldir,
- .current_dir = dirent,
- .count = count
- };
+ struct compat_getdents_callback buf;
int error;
if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -967,12 +961,17 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
if (!f.file)
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ buf.current_dir = dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(f.file, compat_filldir, &buf);
if (error >= 0)
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
- if (put_user(buf.ctx.pos, &lastdirent->d_off))
+ if (put_user(f.file->f_pos, &lastdirent->d_off))
error = -EFAULT;
else
error = count - buf.count;
@@ -984,7 +983,6 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
#ifndef __ARCH_OMIT_COMPAT_SYS_GETDENTS64
struct compat_getdents_callback64 {
- struct dir_context ctx;
struct linux_dirent64 __user *current_dir;
struct linux_dirent64 __user *previous;
int count;
@@ -1038,11 +1036,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
{
struct fd f;
struct linux_dirent64 __user * lastdirent;
- struct compat_getdents_callback64 buf = {
- .ctx.actor = compat_filldir64,
- .current_dir = dirent,
- .count = count
- };
+ struct compat_getdents_callback64 buf;
int error;
if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -1052,12 +1046,17 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
if (!f.file)
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ buf.current_dir = dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(f.file, compat_filldir64, &buf);
if (error >= 0)
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
- typeof(lastdirent->d_off) d_off = buf.ctx.pos;
+ typeof(lastdirent->d_off) d_off = f.file->f_pos;
if (__put_user_unaligned(d_off, &lastdirent->d_off))
error = -EFAULT;
else
diff --git a/trunk/fs/compat_ioctl.c b/trunk/fs/compat_ioctl.c
index 5d19acfa7c6c..996cdc5abb85 100644
--- a/trunk/fs/compat_ioctl.c
+++ b/trunk/fs/compat_ioctl.c
@@ -66,6 +66,7 @@
#include
#ifdef CONFIG_BLOCK
+#include
#include
#include
#include
@@ -953,6 +954,8 @@ COMPATIBLE_IOCTL(MTIOCTOP)
/* Socket level stuff */
COMPATIBLE_IOCTL(FIOQSIZE)
#ifdef CONFIG_BLOCK
+/* loop */
+IGNORE_IOCTL(LOOP_CLR_FD)
/* md calls this on random blockdevs */
IGNORE_IOCTL(RAID_VERSION)
/* qemu/qemu-img might call these two on plain files for probing */
diff --git a/trunk/fs/configfs/dir.c b/trunk/fs/configfs/dir.c
index 64e5323cbbb0..7aabc6ad4e9b 100644
--- a/trunk/fs/configfs/dir.c
+++ b/trunk/fs/configfs/dir.c
@@ -1532,66 +1532,84 @@ static inline unsigned char dt_type(struct configfs_dirent *sd)
return (sd->s_mode >> 12) & 15;
}
-static int configfs_readdir(struct file *file, struct dir_context *ctx)
+static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = filp->f_path.dentry;
struct super_block *sb = dentry->d_sb;
struct configfs_dirent * parent_sd = dentry->d_fsdata;
- struct configfs_dirent *cursor = file->private_data;
+ struct configfs_dirent *cursor = filp->private_data;
struct list_head *p, *q = &cursor->s_sibling;
ino_t ino = 0;
+ int i = filp->f_pos;
- if (!dir_emit_dots(file, ctx))
- return 0;
- if (ctx->pos == 2) {
- spin_lock(&configfs_dirent_lock);
- list_move(q, &parent_sd->s_children);
- spin_unlock(&configfs_dirent_lock);
- }
- for (p = q->next; p != &parent_sd->s_children; p = p->next) {
- struct configfs_dirent *next;
- const char *name;
- int len;
- struct inode *inode = NULL;
-
- next = list_entry(p, struct configfs_dirent, s_sibling);
- if (!next->s_element)
- continue;
-
- name = configfs_get_name(next);
- len = strlen(name);
+ switch (i) {
+ case 0:
+ ino = dentry->d_inode->i_ino;
+ if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+ break;
+ filp->f_pos++;
+ i++;
+ /* fallthrough */
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
+ break;
+ filp->f_pos++;
+ i++;
+ /* fallthrough */
+ default:
+ if (filp->f_pos == 2) {
+ spin_lock(&configfs_dirent_lock);
+ list_move(q, &parent_sd->s_children);
+ spin_unlock(&configfs_dirent_lock);
+ }
+ for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
+ struct configfs_dirent *next;
+ const char * name;
+ int len;
+ struct inode *inode = NULL;
- /*
- * We'll have a dentry and an inode for
- * PINNED items and for open attribute
- * files. We lock here to prevent a race
- * with configfs_d_iput() clearing
- * s_dentry before calling iput().
- *
- * Why do we go to the trouble? If
- * someone has an attribute file open,
- * the inode number should match until
- * they close it. Beyond that, we don't
- * care.
- */
- spin_lock(&configfs_dirent_lock);
- dentry = next->s_dentry;
- if (dentry)
- inode = dentry->d_inode;
- if (inode)
- ino = inode->i_ino;
- spin_unlock(&configfs_dirent_lock);
- if (!inode)
- ino = iunique(sb, 2);
+ next = list_entry(p, struct configfs_dirent,
+ s_sibling);
+ if (!next->s_element)
+ continue;
+
+ name = configfs_get_name(next);
+ len = strlen(name);
+
+ /*
+ * We'll have a dentry and an inode for
+ * PINNED items and for open attribute
+ * files. We lock here to prevent a race
+ * with configfs_d_iput() clearing
+ * s_dentry before calling iput().
+ *
+ * Why do we go to the trouble? If
+ * someone has an attribute file open,
+ * the inode number should match until
+ * they close it. Beyond that, we don't
+ * care.
+ */
+ spin_lock(&configfs_dirent_lock);
+ dentry = next->s_dentry;
+ if (dentry)
+ inode = dentry->d_inode;
+ if (inode)
+ ino = inode->i_ino;
+ spin_unlock(&configfs_dirent_lock);
+ if (!inode)
+ ino = iunique(sb, 2);
- if (!dir_emit(ctx, name, len, ino, dt_type(next)))
- return 0;
+ if (filldir(dirent, name, len, filp->f_pos, ino,
+ dt_type(next)) < 0)
+ return 0;
- spin_lock(&configfs_dirent_lock);
- list_move(q, p);
- spin_unlock(&configfs_dirent_lock);
- p = q;
- ctx->pos++;
+ spin_lock(&configfs_dirent_lock);
+ list_move(q, p);
+ spin_unlock(&configfs_dirent_lock);
+ p = q;
+ filp->f_pos++;
+ }
}
return 0;
}
@@ -1643,7 +1661,7 @@ const struct file_operations configfs_dir_operations = {
.release = configfs_dir_close,
.llseek = configfs_dir_lseek,
.read = generic_read_dir,
- .iterate = configfs_readdir,
+ .readdir = configfs_readdir,
};
int configfs_register_subsystem(struct configfs_subsystem *subsys)
diff --git a/trunk/fs/cramfs/inode.c b/trunk/fs/cramfs/inode.c
index e501ac3a49ff..35b1c7bd18b7 100644
--- a/trunk/fs/cramfs/inode.c
+++ b/trunk/fs/cramfs/inode.c
@@ -349,17 +349,18 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
/*
* Read a cramfs directory entry.
*/
-static int cramfs_readdir(struct file *file, struct dir_context *ctx)
+static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
char *buf;
unsigned int offset;
+ int copied;
/* Offset within the thing. */
- if (ctx->pos >= inode->i_size)
+ offset = filp->f_pos;
+ if (offset >= inode->i_size)
return 0;
- offset = ctx->pos;
/* Directory entries are always 4-byte aligned */
if (offset & 3)
return -EINVAL;
@@ -368,13 +369,14 @@ static int cramfs_readdir(struct file *file, struct dir_context *ctx)
if (!buf)
return -ENOMEM;
+ copied = 0;
while (offset < inode->i_size) {
struct cramfs_inode *de;
unsigned long nextoffset;
char *name;
ino_t ino;
umode_t mode;
- int namelen;
+ int namelen, error;
mutex_lock(&read_mutex);
de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
@@ -400,10 +402,13 @@ static int cramfs_readdir(struct file *file, struct dir_context *ctx)
break;
namelen--;
}
- if (!dir_emit(ctx, buf, namelen, ino, mode >> 12))
+ error = filldir(dirent, buf, namelen, offset, ino, mode >> 12);
+ if (error)
break;
- ctx->pos = offset = nextoffset;
+ offset = nextoffset;
+ filp->f_pos = offset;
+ copied++;
}
kfree(buf);
return 0;
@@ -542,7 +547,7 @@ static const struct address_space_operations cramfs_aops = {
static const struct file_operations cramfs_directory_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = cramfs_readdir,
+ .readdir = cramfs_readdir,
};
static const struct inode_operations cramfs_dir_inode_operations = {
diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c
index 5a23073138df..f09b9085f7d8 100644
--- a/trunk/fs/dcache.c
+++ b/trunk/fs/dcache.c
@@ -1612,10 +1612,6 @@ EXPORT_SYMBOL(d_obtain_alias);
* If a dentry was found and moved, then it is returned. Otherwise NULL
* is returned. This matches the expected return value of ->lookup.
*
- * Cluster filesystems may call this function with a negative, hashed dentry.
- * In that case, we know that the inode will be a regular file, and also this
- * will only occur during atomic_open. So we need to check for the dentry
- * being already hashed only in the final case.
*/
struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
{
@@ -1640,11 +1636,8 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
security_d_instantiate(dentry, inode);
d_rehash(dentry);
}
- } else {
- d_instantiate(dentry, inode);
- if (d_unhashed(dentry))
- d_rehash(dentry);
- }
+ } else
+ d_add(dentry, inode);
return new;
}
EXPORT_SYMBOL(d_splice_alias);
diff --git a/trunk/fs/dlm/lowcomms.c b/trunk/fs/dlm/lowcomms.c
index d0ccd2fd79eb..efbe7af42002 100644
--- a/trunk/fs/dlm/lowcomms.c
+++ b/trunk/fs/dlm/lowcomms.c
@@ -664,7 +664,7 @@ static void process_sctp_notification(struct connection *con,
/* Send any pending writes */
clear_bit(CF_CONNECT_PENDING, &new_con->flags);
- clear_bit(CF_INIT_PENDING, &con->flags);
+ clear_bit(CF_INIT_PENDING, &new_con->flags);
if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) {
queue_work(send_workqueue, &new_con->swork);
}
diff --git a/trunk/fs/ecryptfs/file.c b/trunk/fs/ecryptfs/file.c
index 9aa05e08060b..a7abbea2c096 100644
--- a/trunk/fs/ecryptfs/file.c
+++ b/trunk/fs/ecryptfs/file.c
@@ -68,9 +68,9 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
}
struct ecryptfs_getdents_callback {
- struct dir_context ctx;
- struct dir_context *caller;
+ void *dirent;
struct dentry *dentry;
+ filldir_t filldir;
int filldir_called;
int entries_written;
};
@@ -96,10 +96,9 @@ ecryptfs_filldir(void *dirent, const char *lower_name, int lower_namelen,
rc);
goto out;
}
- buf->caller->pos = buf->ctx.pos;
- rc = !dir_emit(buf->caller, name, name_size, ino, d_type);
+ rc = buf->filldir(buf->dirent, name, name_size, offset, ino, d_type);
kfree(name);
- if (!rc)
+ if (rc >= 0)
buf->entries_written++;
out:
return rc;
@@ -108,23 +107,27 @@ ecryptfs_filldir(void *dirent, const char *lower_name, int lower_namelen,
/**
* ecryptfs_readdir
* @file: The eCryptfs directory file
- * @ctx: The actor to feed the entries to
+ * @dirent: Directory entry handle
+ * @filldir: The filldir callback function
*/
-static int ecryptfs_readdir(struct file *file, struct dir_context *ctx)
+static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
int rc;
struct file *lower_file;
struct inode *inode;
- struct ecryptfs_getdents_callback buf = {
- .ctx.actor = ecryptfs_filldir,
- .caller = ctx,
- .dentry = file->f_path.dentry
- };
+ struct ecryptfs_getdents_callback buf;
+
lower_file = ecryptfs_file_to_lower(file);
- lower_file->f_pos = ctx->pos;
+ lower_file->f_pos = file->f_pos;
inode = file_inode(file);
- rc = iterate_dir(lower_file, &buf.ctx);
- ctx->pos = buf.ctx.pos;
+ memset(&buf, 0, sizeof(buf));
+ buf.dirent = dirent;
+ buf.dentry = file->f_path.dentry;
+ buf.filldir = filldir;
+ buf.filldir_called = 0;
+ buf.entries_written = 0;
+ rc = vfs_readdir(lower_file, ecryptfs_filldir, (void *)&buf);
+ file->f_pos = lower_file->f_pos;
if (rc < 0)
goto out;
if (buf.filldir_called && !buf.entries_written)
@@ -341,7 +344,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#endif
const struct file_operations ecryptfs_dir_fops = {
- .iterate = ecryptfs_readdir,
+ .readdir = ecryptfs_readdir,
.read = generic_read_dir,
.unlocked_ioctl = ecryptfs_unlocked_ioctl,
#ifdef CONFIG_COMPAT
@@ -362,7 +365,7 @@ const struct file_operations ecryptfs_main_fops = {
.aio_read = ecryptfs_read_update_atime,
.write = do_sync_write,
.aio_write = generic_file_aio_write,
- .iterate = ecryptfs_readdir,
+ .readdir = ecryptfs_readdir,
.unlocked_ioctl = ecryptfs_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
diff --git a/trunk/fs/efs/dir.c b/trunk/fs/efs/dir.c
index b72307ccdf7a..055a9e9ca747 100644
--- a/trunk/fs/efs/dir.c
+++ b/trunk/fs/efs/dir.c
@@ -7,38 +7,40 @@
#include
#include "efs.h"
-static int efs_readdir(struct file *, struct dir_context *);
+static int efs_readdir(struct file *, void *, filldir_t);
const struct file_operations efs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = efs_readdir,
+ .readdir = efs_readdir,
};
const struct inode_operations efs_dir_inode_operations = {
.lookup = efs_lookup,
};
-static int efs_readdir(struct file *file, struct dir_context *ctx)
-{
- struct inode *inode = file_inode(file);
+static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
+ struct inode *inode = file_inode(filp);
+ struct buffer_head *bh;
+
+ struct efs_dir *dirblock;
+ struct efs_dentry *dirslot;
+ efs_ino_t inodenum;
efs_block_t block;
- int slot;
+ int slot, namelen;
+ char *nameptr;
if (inode->i_size & (EFS_DIRBSIZE-1))
printk(KERN_WARNING "EFS: WARNING: readdir(): directory size not a multiple of EFS_DIRBSIZE\n");
/* work out where this entry can be found */
- block = ctx->pos >> EFS_DIRBSIZE_BITS;
+ block = filp->f_pos >> EFS_DIRBSIZE_BITS;
/* each block contains at most 256 slots */
- slot = ctx->pos & 0xff;
+ slot = filp->f_pos & 0xff;
/* look at all blocks */
while (block < inode->i_blocks) {
- struct efs_dir *dirblock;
- struct buffer_head *bh;
-
/* read the dir block */
bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
@@ -55,14 +57,11 @@ static int efs_readdir(struct file *file, struct dir_context *ctx)
break;
}
- for (; slot < dirblock->slots; slot++) {
- struct efs_dentry *dirslot;
- efs_ino_t inodenum;
- const char *nameptr;
- int namelen;
-
- if (dirblock->space[slot] == 0)
+ while (slot < dirblock->slots) {
+ if (dirblock->space[slot] == 0) {
+ slot++;
continue;
+ }
dirslot = (struct efs_dentry *) (((char *) bh->b_data) + EFS_SLOTAT(dirblock, slot));
@@ -73,29 +72,39 @@ static int efs_readdir(struct file *file, struct dir_context *ctx)
#ifdef DEBUG
printk(KERN_DEBUG "EFS: readdir(): block %d slot %d/%d: inode %u, name \"%s\", namelen %u\n", block, slot, dirblock->slots-1, inodenum, nameptr, namelen);
#endif
- if (!namelen)
- continue;
- /* found the next entry */
- ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot;
-
- /* sanity check */
- if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) {
- printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot);
- continue;
- }
-
- /* copy filename and data in dirslot */
- if (!dir_emit(ctx, nameptr, namelen, inodenum, DT_UNKNOWN)) {
+ if (namelen > 0) {
+ /* found the next entry */
+ filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
+
+ /* copy filename and data in dirslot */
+ filldir(dirent, nameptr, namelen, filp->f_pos, inodenum, DT_UNKNOWN);
+
+ /* sanity check */
+ if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) {
+ printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot);
+ slot++;
+ continue;
+ }
+
+ /* store position of next slot */
+ if (++slot == dirblock->slots) {
+ slot = 0;
+ block++;
+ }
brelse(bh);
- return 0;
+ filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
+ goto out;
}
+ slot++;
}
brelse(bh);
slot = 0;
block++;
}
- ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot;
+
+ filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
+out:
return 0;
}
diff --git a/trunk/fs/exec.c b/trunk/fs/exec.c
index ffd7a813ad3d..643019585574 100644
--- a/trunk/fs/exec.c
+++ b/trunk/fs/exec.c
@@ -1135,6 +1135,13 @@ void setup_new_exec(struct linux_binprm * bprm)
set_dumpable(current->mm, suid_dumpable);
}
+ /*
+ * Flush performance counters when crossing a
+ * security domain:
+ */
+ if (!get_dumpable(current->mm))
+ perf_event_exit_task(current);
+
/* An exec changes our domain. We are no longer part of the thread
group */
@@ -1198,15 +1205,6 @@ void install_exec_creds(struct linux_binprm *bprm)
commit_creds(bprm->cred);
bprm->cred = NULL;
-
- /*
- * Disable monitoring for regular users
- * when executing setuid binaries. Must
- * wait until new credentials are committed
- * by commit_creds() above
- */
- if (get_dumpable(current->mm) != SUID_DUMP_USER)
- perf_event_exit_task(current);
/*
* cred_guard_mutex must be held at least to this point to prevent
* ptrace_attach() from altering our determination of the task's
diff --git a/trunk/fs/exofs/dir.c b/trunk/fs/exofs/dir.c
index 49f51ab4caac..46375896cfc0 100644
--- a/trunk/fs/exofs/dir.c
+++ b/trunk/fs/exofs/dir.c
@@ -239,19 +239,22 @@ void exofs_set_de_type(struct exofs_dir_entry *de, struct inode *inode)
}
static int
-exofs_readdir(struct file *file, struct dir_context *ctx)
+exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- loff_t pos = ctx->pos;
- struct inode *inode = file_inode(file);
+ loff_t pos = filp->f_pos;
+ struct inode *inode = file_inode(filp);
unsigned int offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
- int need_revalidate = (file->f_version != inode->i_version);
+ unsigned char *types = NULL;
+ int need_revalidate = (filp->f_version != inode->i_version);
if (pos > inode->i_size - EXOFS_DIR_REC_LEN(1))
return 0;
+ types = exofs_filetype_table;
+
for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
struct exofs_dir_entry *de;
@@ -260,7 +263,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
if (IS_ERR(page)) {
EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
inode->i_ino);
- ctx->pos += PAGE_CACHE_SIZE - offset;
+ filp->f_pos += PAGE_CACHE_SIZE - offset;
return PTR_ERR(page);
}
kaddr = page_address(page);
@@ -268,9 +271,9 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
if (offset) {
offset = exofs_validate_entry(kaddr, offset,
chunk_mask);
- ctx->pos = (n<f_pos = (n<f_version = inode->i_version;
+ filp->f_version = inode->i_version;
need_revalidate = 0;
}
de = (struct exofs_dir_entry *)(kaddr + offset);
@@ -285,24 +288,27 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
return -EIO;
}
if (de->inode_no) {
- unsigned char t;
+ int over;
+ unsigned char d_type = DT_UNKNOWN;
- if (de->file_type < EXOFS_FT_MAX)
- t = exofs_filetype_table[de->file_type];
- else
- t = DT_UNKNOWN;
+ if (types && de->file_type < EXOFS_FT_MAX)
+ d_type = types[de->file_type];
- if (!dir_emit(ctx, de->name, de->name_len,
+ offset = (char *)de - kaddr;
+ over = filldir(dirent, de->name, de->name_len,
+ (n<inode_no),
- t)) {
+ d_type);
+ if (over) {
exofs_put_page(page);
return 0;
}
}
- ctx->pos += le16_to_cpu(de->rec_len);
+ filp->f_pos += le16_to_cpu(de->rec_len);
}
exofs_put_page(page);
}
+
return 0;
}
@@ -663,5 +669,5 @@ int exofs_empty_dir(struct inode *inode)
const struct file_operations exofs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = exofs_readdir,
+ .readdir = exofs_readdir,
};
diff --git a/trunk/fs/exofs/inode.c b/trunk/fs/exofs/inode.c
index 2ec8eb1ab269..d1f80abd8828 100644
--- a/trunk/fs/exofs/inode.c
+++ b/trunk/fs/exofs/inode.c
@@ -953,11 +953,9 @@ static int exofs_releasepage(struct page *page, gfp_t gfp)
return 0;
}
-static void exofs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void exofs_invalidatepage(struct page *page, unsigned long offset)
{
- EXOFS_DBGMSG("page 0x%lx offset 0x%x length 0x%x\n",
- page->index, offset, length);
+ EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
WARN_ON(1);
}
diff --git a/trunk/fs/exportfs/expfs.c b/trunk/fs/exportfs/expfs.c
index 293bc2e47a73..262fc9940982 100644
--- a/trunk/fs/exportfs/expfs.c
+++ b/trunk/fs/exportfs/expfs.c
@@ -212,7 +212,6 @@ reconnect_path(struct vfsmount *mnt, struct dentry *target_dir, char *nbuf)
}
struct getdents_callback {
- struct dir_context ctx;
char *name; /* name that was found. It already points to a
buffer NAME_MAX+1 is size */
unsigned long ino; /* the inum we are looking for */
@@ -255,11 +254,7 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
struct inode *dir = path->dentry->d_inode;
int error;
struct file *file;
- struct getdents_callback buffer = {
- .ctx.actor = filldir_one,
- .name = name,
- .ino = child->d_inode->i_ino
- };
+ struct getdents_callback buffer;
error = -ENOTDIR;
if (!dir || !S_ISDIR(dir->i_mode))
@@ -276,14 +271,17 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
goto out;
error = -EINVAL;
- if (!file->f_op->iterate)
+ if (!file->f_op->readdir)
goto out_close;
+ buffer.name = name;
+ buffer.ino = child->d_inode->i_ino;
+ buffer.found = 0;
buffer.sequence = 0;
while (1) {
int old_seq = buffer.sequence;
- error = iterate_dir(file, &buffer.ctx);
+ error = vfs_readdir(file, filldir_one, &buffer);
if (buffer.found) {
error = 0;
break;
diff --git a/trunk/fs/ext2/dir.c b/trunk/fs/ext2/dir.c
index 6e1d4ab09d72..4237722bfd27 100644
--- a/trunk/fs/ext2/dir.c
+++ b/trunk/fs/ext2/dir.c
@@ -287,17 +287,17 @@ static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
}
static int
-ext2_readdir(struct file *file, struct dir_context *ctx)
+ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
{
- loff_t pos = ctx->pos;
- struct inode *inode = file_inode(file);
+ loff_t pos = filp->f_pos;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
unsigned int offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
unsigned char *types = NULL;
- int need_revalidate = file->f_version != inode->i_version;
+ int need_revalidate = filp->f_version != inode->i_version;
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
return 0;
@@ -314,16 +314,16 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
ext2_error(sb, __func__,
"bad page in #%lu",
inode->i_ino);
- ctx->pos += PAGE_CACHE_SIZE - offset;
+ filp->f_pos += PAGE_CACHE_SIZE - offset;
return PTR_ERR(page);
}
kaddr = page_address(page);
if (unlikely(need_revalidate)) {
if (offset) {
offset = ext2_validate_entry(kaddr, offset, chunk_mask);
- ctx->pos = (n<f_pos = (n<f_version = inode->i_version;
+ filp->f_version = inode->i_version;
need_revalidate = 0;
}
de = (ext2_dirent *)(kaddr+offset);
@@ -336,19 +336,22 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
return -EIO;
}
if (de->inode) {
+ int over;
unsigned char d_type = DT_UNKNOWN;
if (types && de->file_type < EXT2_FT_MAX)
d_type = types[de->file_type];
- if (!dir_emit(ctx, de->name, de->name_len,
- le32_to_cpu(de->inode),
- d_type)) {
+ offset = (char *)de - kaddr;
+ over = filldir(dirent, de->name, de->name_len,
+ (n<inode), d_type);
+ if (over) {
ext2_put_page(page);
return 0;
}
}
- ctx->pos += ext2_rec_len_from_disk(de->rec_len);
+ filp->f_pos += ext2_rec_len_from_disk(de->rec_len);
}
ext2_put_page(page);
}
@@ -721,7 +724,7 @@ int ext2_empty_dir (struct inode * inode)
const struct file_operations ext2_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = ext2_readdir,
+ .readdir = ext2_readdir,
.unlocked_ioctl = ext2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
diff --git a/trunk/fs/ext3/dir.c b/trunk/fs/ext3/dir.c
index f522425aaa24..87eccbbca255 100644
--- a/trunk/fs/ext3/dir.c
+++ b/trunk/fs/ext3/dir.c
@@ -28,7 +28,8 @@ static unsigned char ext3_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
-static int ext3_dx_readdir(struct file *, struct dir_context *);
+static int ext3_dx_readdir(struct file * filp,
+ void * dirent, filldir_t filldir);
static unsigned char get_dtype(struct super_block *sb, int filetype)
{
@@ -90,30 +91,36 @@ int ext3_check_dir_entry (const char * function, struct inode * dir,
return error_msg == NULL ? 1 : 0;
}
-static int ext3_readdir(struct file *file, struct dir_context *ctx)
+static int ext3_readdir(struct file * filp,
+ void * dirent, filldir_t filldir)
{
+ int error = 0;
unsigned long offset;
- int i;
+ int i, stored;
struct ext3_dir_entry_2 *de;
int err;
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
+ int ret = 0;
int dir_has_error = 0;
if (is_dx_dir(inode)) {
- err = ext3_dx_readdir(file, ctx);
- if (err != ERR_BAD_DX_DIR)
- return err;
+ err = ext3_dx_readdir(filp, dirent, filldir);
+ if (err != ERR_BAD_DX_DIR) {
+ ret = err;
+ goto out;
+ }
/*
* We don't set the inode dirty flag since it's not
* critical that it get flushed back to the disk.
*/
- EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
+ EXT3_I(file_inode(filp))->i_flags &= ~EXT3_INDEX_FL;
}
- offset = ctx->pos & (sb->s_blocksize - 1);
+ stored = 0;
+ offset = filp->f_pos & (sb->s_blocksize - 1);
- while (ctx->pos < inode->i_size) {
- unsigned long blk = ctx->pos >> EXT3_BLOCK_SIZE_BITS(sb);
+ while (!error && !stored && filp->f_pos < inode->i_size) {
+ unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb);
struct buffer_head map_bh;
struct buffer_head *bh = NULL;
@@ -122,12 +129,12 @@ static int ext3_readdir(struct file *file, struct dir_context *ctx)
if (err > 0) {
pgoff_t index = map_bh.b_blocknr >>
(PAGE_CACHE_SHIFT - inode->i_blkbits);
- if (!ra_has_index(&file->f_ra, index))
+ if (!ra_has_index(&filp->f_ra, index))
page_cache_sync_readahead(
sb->s_bdev->bd_inode->i_mapping,
- &file->f_ra, file,
+ &filp->f_ra, filp,
index, 1);
- file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+ filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext3_bread(NULL, inode, blk, 0, &err);
}
@@ -139,21 +146,22 @@ static int ext3_readdir(struct file *file, struct dir_context *ctx)
if (!dir_has_error) {
ext3_error(sb, __func__, "directory #%lu "
"contains a hole at offset %lld",
- inode->i_ino, ctx->pos);
+ inode->i_ino, filp->f_pos);
dir_has_error = 1;
}
/* corrupt size? Maybe no more blocks to read */
- if (ctx->pos > inode->i_blocks << 9)
+ if (filp->f_pos > inode->i_blocks << 9)
break;
- ctx->pos += sb->s_blocksize - offset;
+ filp->f_pos += sb->s_blocksize - offset;
continue;
}
+revalidate:
/* If the dir block has changed since the last call to
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (offset && file->f_version != inode->i_version) {
+ if (filp->f_version != inode->i_version) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ext3_dir_entry_2 *)
(bh->b_data + i);
@@ -169,40 +177,53 @@ static int ext3_readdir(struct file *file, struct dir_context *ctx)
i += ext3_rec_len_from_disk(de->rec_len);
}
offset = i;
- ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
+ filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
| offset;
- file->f_version = inode->i_version;
+ filp->f_version = inode->i_version;
}
- while (ctx->pos < inode->i_size
+ while (!error && filp->f_pos < inode->i_size
&& offset < sb->s_blocksize) {
de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
bh, offset)) {
- /* On error, skip the to the
+ /* On error, skip the f_pos to the
next block. */
- ctx->pos = (ctx->pos |
+ filp->f_pos = (filp->f_pos |
(sb->s_blocksize - 1)) + 1;
- break;
+ brelse (bh);
+ ret = stored;
+ goto out;
}
offset += ext3_rec_len_from_disk(de->rec_len);
if (le32_to_cpu(de->inode)) {
- if (!dir_emit(ctx, de->name, de->name_len,
- le32_to_cpu(de->inode),
- get_dtype(sb, de->file_type))) {
- brelse(bh);
- return 0;
- }
+ /* We might block in the next section
+ * if the data destination is
+ * currently swapped out. So, use a
+ * version stamp to detect whether or
+ * not the directory has been modified
+ * during the copy operation.
+ */
+ u64 version = filp->f_version;
+
+ error = filldir(dirent, de->name,
+ de->name_len,
+ filp->f_pos,
+ le32_to_cpu(de->inode),
+ get_dtype(sb, de->file_type));
+ if (error)
+ break;
+ if (version != filp->f_version)
+ goto revalidate;
+ stored ++;
}
- ctx->pos += ext3_rec_len_from_disk(de->rec_len);
+ filp->f_pos += ext3_rec_len_from_disk(de->rec_len);
}
offset = 0;
brelse (bh);
- if (ctx->pos < inode->i_size)
- if (!dir_relax(inode))
- return 0;
}
- return 0;
+out:
+ return ret;
}
static inline int is_32bit_api(void)
@@ -431,54 +452,62 @@ int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
* for all entres on the fname linked list. (Normally there is only
* one entry on the linked list, unless there are 62 bit hash collisions.)
*/
-static bool call_filldir(struct file *file, struct dir_context *ctx,
- struct fname *fname)
+static int call_filldir(struct file * filp, void * dirent,
+ filldir_t filldir, struct fname *fname)
{
- struct dir_private_info *info = file->private_data;
- struct inode *inode = file_inode(file);
- struct super_block *sb = inode->i_sb;
+ struct dir_private_info *info = filp->private_data;
+ loff_t curr_pos;
+ struct inode *inode = file_inode(filp);
+ struct super_block * sb;
+ int error;
+
+ sb = inode->i_sb;
if (!fname) {
printk("call_filldir: called with null fname?!?\n");
- return true;
+ return 0;
}
- ctx->pos = hash2pos(file, fname->hash, fname->minor_hash);
+ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
while (fname) {
- if (!dir_emit(ctx, fname->name, fname->name_len,
+ error = filldir(dirent, fname->name,
+ fname->name_len, curr_pos,
fname->inode,
- get_dtype(sb, fname->file_type))) {
+ get_dtype(sb, fname->file_type));
+ if (error) {
+ filp->f_pos = curr_pos;
info->extra_fname = fname;
- return false;
+ return error;
}
fname = fname->next;
}
- return true;
+ return 0;
}
-static int ext3_dx_readdir(struct file *file, struct dir_context *ctx)
+static int ext3_dx_readdir(struct file * filp,
+ void * dirent, filldir_t filldir)
{
- struct dir_private_info *info = file->private_data;
- struct inode *inode = file_inode(file);
+ struct dir_private_info *info = filp->private_data;
+ struct inode *inode = file_inode(filp);
struct fname *fname;
int ret;
if (!info) {
- info = ext3_htree_create_dir_info(file, ctx->pos);
+ info = ext3_htree_create_dir_info(filp, filp->f_pos);
if (!info)
return -ENOMEM;
- file->private_data = info;
+ filp->private_data = info;
}
- if (ctx->pos == ext3_get_htree_eof(file))
+ if (filp->f_pos == ext3_get_htree_eof(filp))
return 0; /* EOF */
/* Some one has messed with f_pos; reset the world */
- if (info->last_pos != ctx->pos) {
+ if (info->last_pos != filp->f_pos) {
free_rb_tree_fname(&info->root);
info->curr_node = NULL;
info->extra_fname = NULL;
- info->curr_hash = pos2maj_hash(file, ctx->pos);
- info->curr_minor_hash = pos2min_hash(file, ctx->pos);
+ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
+ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
}
/*
@@ -486,7 +515,7 @@ static int ext3_dx_readdir(struct file *file, struct dir_context *ctx)
* chain, return them first.
*/
if (info->extra_fname) {
- if (!call_filldir(file, ctx, info->extra_fname))
+ if (call_filldir(filp, dirent, filldir, info->extra_fname))
goto finished;
info->extra_fname = NULL;
goto next_node;
@@ -500,17 +529,17 @@ static int ext3_dx_readdir(struct file *file, struct dir_context *ctx)
* cached entries.
*/
if ((!info->curr_node) ||
- (file->f_version != inode->i_version)) {
+ (filp->f_version != inode->i_version)) {
info->curr_node = NULL;
free_rb_tree_fname(&info->root);
- file->f_version = inode->i_version;
- ret = ext3_htree_fill_tree(file, info->curr_hash,
+ filp->f_version = inode->i_version;
+ ret = ext3_htree_fill_tree(filp, info->curr_hash,
info->curr_minor_hash,
&info->next_hash);
if (ret < 0)
return ret;
if (ret == 0) {
- ctx->pos = ext3_get_htree_eof(file);
+ filp->f_pos = ext3_get_htree_eof(filp);
break;
}
info->curr_node = rb_first(&info->root);
@@ -519,7 +548,7 @@ static int ext3_dx_readdir(struct file *file, struct dir_context *ctx)
fname = rb_entry(info->curr_node, struct fname, rb_hash);
info->curr_hash = fname->hash;
info->curr_minor_hash = fname->minor_hash;
- if (!call_filldir(file, ctx, fname))
+ if (call_filldir(filp, dirent, filldir, fname))
break;
next_node:
info->curr_node = rb_next(info->curr_node);
@@ -530,7 +559,7 @@ static int ext3_dx_readdir(struct file *file, struct dir_context *ctx)
info->curr_minor_hash = fname->minor_hash;
} else {
if (info->next_hash == ~0) {
- ctx->pos = ext3_get_htree_eof(file);
+ filp->f_pos = ext3_get_htree_eof(filp);
break;
}
info->curr_hash = info->next_hash;
@@ -538,7 +567,7 @@ static int ext3_dx_readdir(struct file *file, struct dir_context *ctx)
}
}
finished:
- info->last_pos = ctx->pos;
+ info->last_pos = filp->f_pos;
return 0;
}
@@ -553,7 +582,7 @@ static int ext3_release_dir (struct inode * inode, struct file * filp)
const struct file_operations ext3_dir_operations = {
.llseek = ext3_dir_llseek,
.read = generic_read_dir,
- .iterate = ext3_readdir,
+ .readdir = ext3_readdir,
.unlocked_ioctl = ext3_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext3_compat_ioctl,
diff --git a/trunk/fs/ext3/inode.c b/trunk/fs/ext3/inode.c
index f67668f724ba..23c712825640 100644
--- a/trunk/fs/ext3/inode.c
+++ b/trunk/fs/ext3/inode.c
@@ -1825,20 +1825,19 @@ ext3_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
}
-static void ext3_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void ext3_invalidatepage(struct page *page, unsigned long offset)
{
journal_t *journal = EXT3_JOURNAL(page->mapping->host);
- trace_ext3_invalidatepage(page, offset, length);
+ trace_ext3_invalidatepage(page, offset);
/*
* If it's a full truncate we just forget about the pending dirtying
*/
- if (offset == 0 && length == PAGE_CACHE_SIZE)
+ if (offset == 0)
ClearPageChecked(page);
- journal_invalidatepage(journal, page, offset, length);
+ journal_invalidatepage(journal, page, offset);
}
static int ext3_releasepage(struct page *page, gfp_t wait)
diff --git a/trunk/fs/ext3/namei.c b/trunk/fs/ext3/namei.c
index cea8ecf3e76e..692de13e3596 100644
--- a/trunk/fs/ext3/namei.c
+++ b/trunk/fs/ext3/namei.c
@@ -576,8 +576,11 @@ static int htree_dirblock_to_tree(struct file *dir_file,
if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
(block<i_sb))
+((char *)de - bh->b_data))) {
- /* silently ignore the rest of the block */
- break;
+ /* On error, skip the f_pos to the next block. */
+ dir_file->f_pos = (dir_file->f_pos |
+ (dir->i_sb->s_blocksize - 1)) + 1;
+ brelse (bh);
+ return count;
}
ext3fs_dirhash(de->name, de->name_len, hinfo);
if ((hinfo->hash < start_hash) ||
diff --git a/trunk/fs/ext4/balloc.c b/trunk/fs/ext4/balloc.c
index 58339393fa6e..d0f13eada0ed 100644
--- a/trunk/fs/ext4/balloc.c
+++ b/trunk/fs/ext4/balloc.c
@@ -682,15 +682,11 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
static inline int test_root(ext4_group_t a, int b)
{
- while (1) {
- if (a < b)
- return 0;
- if (a == b)
- return 1;
- if ((a % b) != 0)
- return 0;
- a = a / b;
- }
+ int num = b;
+
+ while (a > num)
+ num *= b;
+ return num == a;
}
static int ext4_group_sparse(ext4_group_t group)
diff --git a/trunk/fs/ext4/dir.c b/trunk/fs/ext4/dir.c
index 3c7d288ae94c..f8d56e4254e0 100644
--- a/trunk/fs/ext4/dir.c
+++ b/trunk/fs/ext4/dir.c
@@ -29,7 +29,8 @@
#include "ext4.h"
#include "xattr.h"
-static int ext4_dx_readdir(struct file *, struct dir_context *);
+static int ext4_dx_readdir(struct file *filp,
+ void *dirent, filldir_t filldir);
/**
* Check if the given dir-inode refers to an htree-indexed directory
@@ -102,56 +103,60 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
return 1;
}
-static int ext4_readdir(struct file *file, struct dir_context *ctx)
+static int ext4_readdir(struct file *filp,
+ void *dirent, filldir_t filldir)
{
+ int error = 0;
unsigned int offset;
int i, stored;
struct ext4_dir_entry_2 *de;
int err;
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
+ int ret = 0;
int dir_has_error = 0;
if (is_dx_dir(inode)) {
- err = ext4_dx_readdir(file, ctx);
+ err = ext4_dx_readdir(filp, dirent, filldir);
if (err != ERR_BAD_DX_DIR) {
- return err;
+ ret = err;
+ goto out;
}
/*
* We don't set the inode dirty flag since it's not
* critical that it get flushed back to the disk.
*/
- ext4_clear_inode_flag(file_inode(file),
+ ext4_clear_inode_flag(file_inode(filp),
EXT4_INODE_INDEX);
}
if (ext4_has_inline_data(inode)) {
int has_inline_data = 1;
- int ret = ext4_read_inline_dir(file, ctx,
+ ret = ext4_read_inline_dir(filp, dirent, filldir,
&has_inline_data);
if (has_inline_data)
return ret;
}
stored = 0;
- offset = ctx->pos & (sb->s_blocksize - 1);
+ offset = filp->f_pos & (sb->s_blocksize - 1);
- while (ctx->pos < inode->i_size) {
+ while (!error && !stored && filp->f_pos < inode->i_size) {
struct ext4_map_blocks map;
struct buffer_head *bh = NULL;
- map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
+ map.m_lblk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb);
map.m_len = 1;
err = ext4_map_blocks(NULL, inode, &map, 0);
if (err > 0) {
pgoff_t index = map.m_pblk >>
(PAGE_CACHE_SHIFT - inode->i_blkbits);
- if (!ra_has_index(&file->f_ra, index))
+ if (!ra_has_index(&filp->f_ra, index))
page_cache_sync_readahead(
sb->s_bdev->bd_inode->i_mapping,
- &file->f_ra, file,
+ &filp->f_ra, filp,
index, 1);
- file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+ filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err);
}
@@ -161,16 +166,16 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
*/
if (!bh) {
if (!dir_has_error) {
- EXT4_ERROR_FILE(file, 0,
+ EXT4_ERROR_FILE(filp, 0,
"directory contains a "
"hole at offset %llu",
- (unsigned long long) ctx->pos);
+ (unsigned long long) filp->f_pos);
dir_has_error = 1;
}
/* corrupt size? Maybe no more blocks to read */
- if (ctx->pos > inode->i_blocks << 9)
+ if (filp->f_pos > inode->i_blocks << 9)
break;
- ctx->pos += sb->s_blocksize - offset;
+ filp->f_pos += sb->s_blocksize - offset;
continue;
}
@@ -178,20 +183,21 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(inode,
(struct ext4_dir_entry *)bh->b_data)) {
- EXT4_ERROR_FILE(file, 0, "directory fails checksum "
+ EXT4_ERROR_FILE(filp, 0, "directory fails checksum "
"at offset %llu",
- (unsigned long long)ctx->pos);
- ctx->pos += sb->s_blocksize - offset;
+ (unsigned long long)filp->f_pos);
+ filp->f_pos += sb->s_blocksize - offset;
brelse(bh);
continue;
}
set_buffer_verified(bh);
+revalidate:
/* If the dir block has changed since the last call to
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (file->f_version != inode->i_version) {
+ if (filp->f_version != inode->i_version) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ext4_dir_entry_2 *)
(bh->b_data + i);
@@ -208,46 +214,57 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
sb->s_blocksize);
}
offset = i;
- ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
+ filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
| offset;
- file->f_version = inode->i_version;
+ filp->f_version = inode->i_version;
}
- while (ctx->pos < inode->i_size
+ while (!error && filp->f_pos < inode->i_size
&& offset < sb->s_blocksize) {
de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
- if (ext4_check_dir_entry(inode, file, de, bh,
+ if (ext4_check_dir_entry(inode, filp, de, bh,
bh->b_data, bh->b_size,
offset)) {
/*
- * On error, skip to the next block
+ * On error, skip the f_pos to the next block
*/
- ctx->pos = (ctx->pos |
+ filp->f_pos = (filp->f_pos |
(sb->s_blocksize - 1)) + 1;
- break;
+ brelse(bh);
+ ret = stored;
+ goto out;
}
offset += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
if (le32_to_cpu(de->inode)) {
- if (!dir_emit(ctx, de->name,
+ /* We might block in the next section
+ * if the data destination is
+ * currently swapped out. So, use a
+ * version stamp to detect whether or
+ * not the directory has been modified
+ * during the copy operation.
+ */
+ u64 version = filp->f_version;
+
+ error = filldir(dirent, de->name,
de->name_len,
+ filp->f_pos,
le32_to_cpu(de->inode),
- get_dtype(sb, de->file_type))) {
- brelse(bh);
- return 0;
- }
+ get_dtype(sb, de->file_type));
+ if (error)
+ break;
+ if (version != filp->f_version)
+ goto revalidate;
+ stored++;
}
- ctx->pos += ext4_rec_len_from_disk(de->rec_len,
+ filp->f_pos += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
}
offset = 0;
brelse(bh);
- if (ctx->pos < inode->i_size) {
- if (!dir_relax(inode))
- return 0;
- }
}
- return 0;
+out:
+ return ret;
}
static inline int is_32bit_api(void)
@@ -475,12 +492,16 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
* for all entres on the fname linked list. (Normally there is only
* one entry on the linked list, unless there are 62 bit hash collisions.)
*/
-static int call_filldir(struct file *file, struct dir_context *ctx,
- struct fname *fname)
+static int call_filldir(struct file *filp, void *dirent,
+ filldir_t filldir, struct fname *fname)
{
- struct dir_private_info *info = file->private_data;
- struct inode *inode = file_inode(file);
- struct super_block *sb = inode->i_sb;
+ struct dir_private_info *info = filp->private_data;
+ loff_t curr_pos;
+ struct inode *inode = file_inode(filp);
+ struct super_block *sb;
+ int error;
+
+ sb = inode->i_sb;
if (!fname) {
ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: comm %s: "
@@ -488,44 +509,47 @@ static int call_filldir(struct file *file, struct dir_context *ctx,
inode->i_ino, current->comm);
return 0;
}
- ctx->pos = hash2pos(file, fname->hash, fname->minor_hash);
+ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
while (fname) {
- if (!dir_emit(ctx, fname->name,
- fname->name_len,
+ error = filldir(dirent, fname->name,
+ fname->name_len, curr_pos,
fname->inode,
- get_dtype(sb, fname->file_type))) {
+ get_dtype(sb, fname->file_type));
+ if (error) {
+ filp->f_pos = curr_pos;
info->extra_fname = fname;
- return 1;
+ return error;
}
fname = fname->next;
}
return 0;
}
-static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
+static int ext4_dx_readdir(struct file *filp,
+ void *dirent, filldir_t filldir)
{
- struct dir_private_info *info = file->private_data;
- struct inode *inode = file_inode(file);
+ struct dir_private_info *info = filp->private_data;
+ struct inode *inode = file_inode(filp);
struct fname *fname;
int ret;
if (!info) {
- info = ext4_htree_create_dir_info(file, ctx->pos);
+ info = ext4_htree_create_dir_info(filp, filp->f_pos);
if (!info)
return -ENOMEM;
- file->private_data = info;
+ filp->private_data = info;
}
- if (ctx->pos == ext4_get_htree_eof(file))
+ if (filp->f_pos == ext4_get_htree_eof(filp))
return 0; /* EOF */
/* Some one has messed with f_pos; reset the world */
- if (info->last_pos != ctx->pos) {
+ if (info->last_pos != filp->f_pos) {
free_rb_tree_fname(&info->root);
info->curr_node = NULL;
info->extra_fname = NULL;
- info->curr_hash = pos2maj_hash(file, ctx->pos);
- info->curr_minor_hash = pos2min_hash(file, ctx->pos);
+ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
+ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
}
/*
@@ -533,7 +557,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
* chain, return them first.
*/
if (info->extra_fname) {
- if (call_filldir(file, ctx, info->extra_fname))
+ if (call_filldir(filp, dirent, filldir, info->extra_fname))
goto finished;
info->extra_fname = NULL;
goto next_node;
@@ -547,17 +571,17 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
* cached entries.
*/
if ((!info->curr_node) ||
- (file->f_version != inode->i_version)) {
+ (filp->f_version != inode->i_version)) {
info->curr_node = NULL;
free_rb_tree_fname(&info->root);
- file->f_version = inode->i_version;
- ret = ext4_htree_fill_tree(file, info->curr_hash,
+ filp->f_version = inode->i_version;
+ ret = ext4_htree_fill_tree(filp, info->curr_hash,
info->curr_minor_hash,
&info->next_hash);
if (ret < 0)
return ret;
if (ret == 0) {
- ctx->pos = ext4_get_htree_eof(file);
+ filp->f_pos = ext4_get_htree_eof(filp);
break;
}
info->curr_node = rb_first(&info->root);
@@ -566,7 +590,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
fname = rb_entry(info->curr_node, struct fname, rb_hash);
info->curr_hash = fname->hash;
info->curr_minor_hash = fname->minor_hash;
- if (call_filldir(file, ctx, fname))
+ if (call_filldir(filp, dirent, filldir, fname))
break;
next_node:
info->curr_node = rb_next(info->curr_node);
@@ -577,7 +601,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
info->curr_minor_hash = fname->minor_hash;
} else {
if (info->next_hash == ~0) {
- ctx->pos = ext4_get_htree_eof(file);
+ filp->f_pos = ext4_get_htree_eof(filp);
break;
}
info->curr_hash = info->next_hash;
@@ -585,7 +609,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
}
}
finished:
- info->last_pos = ctx->pos;
+ info->last_pos = filp->f_pos;
return 0;
}
@@ -600,7 +624,7 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
const struct file_operations ext4_dir_operations = {
.llseek = ext4_dir_llseek,
.read = generic_read_dir,
- .iterate = ext4_readdir,
+ .readdir = ext4_readdir,
.unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,
diff --git a/trunk/fs/ext4/ext4.h b/trunk/fs/ext4/ext4.h
index b577e45425b0..5aae3d12d400 100644
--- a/trunk/fs/ext4/ext4.h
+++ b/trunk/fs/ext4/ext4.h
@@ -176,29 +176,39 @@ struct ext4_map_blocks {
unsigned int m_flags;
};
+/*
+ * For delayed allocation tracking
+ */
+struct mpage_da_data {
+ struct inode *inode;
+ sector_t b_blocknr; /* start block number of extent */
+ size_t b_size; /* size of extent */
+ unsigned long b_state; /* state of the extent */
+ unsigned long first_page, next_page; /* extent of pages */
+ struct writeback_control *wbc;
+ int io_done;
+ int pages_written;
+ int retval;
+};
+
/*
* Flags for ext4_io_end->flags
*/
#define EXT4_IO_END_UNWRITTEN 0x0001
-#define EXT4_IO_END_DIRECT 0x0002
+#define EXT4_IO_END_ERROR 0x0002
+#define EXT4_IO_END_DIRECT 0x0004
/*
- * For converting uninitialized extents on a work queue. 'handle' is used for
- * buffered writeback.
+ * For converting uninitialized extents on a work queue.
*/
typedef struct ext4_io_end {
struct list_head list; /* per-file finished IO list */
- handle_t *handle; /* handle reserved for extent
- * conversion */
struct inode *inode; /* file being written to */
- struct bio *bio; /* Linked list of completed
- * bios covering the extent */
unsigned int flag; /* unwritten or not */
loff_t offset; /* offset in the file */
ssize_t size; /* size of the extent */
struct kiocb *iocb; /* iocb struct for AIO */
int result; /* error value for AIO */
- atomic_t count; /* reference counter */
} ext4_io_end_t;
struct ext4_io_submit {
@@ -570,6 +580,11 @@ enum {
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
+/*
+ * Flags used by ext4_discard_partial_page_buffers
+ */
+#define EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 0x0001
+
/*
* ioctl commands
*/
@@ -864,7 +879,6 @@ struct ext4_inode_info {
rwlock_t i_es_lock;
struct list_head i_es_lru;
unsigned int i_es_lru_nr; /* protected by i_es_lock */
- unsigned long i_touch_when; /* jiffies of last accessing */
/* ialloc */
ext4_group_t i_last_alloc_group;
@@ -889,22 +903,12 @@ struct ext4_inode_info {
qsize_t i_reserved_quota;
#endif
- /* Lock protecting lists below */
+ /* completed IOs that might need unwritten extents handling */
+ struct list_head i_completed_io_list;
spinlock_t i_completed_io_lock;
- /*
- * Completed IOs that need unwritten extents handling and have
- * transaction reserved
- */
- struct list_head i_rsv_conversion_list;
- /*
- * Completed IOs that need unwritten extents handling and don't have
- * transaction reserved
- */
- struct list_head i_unrsv_conversion_list;
atomic_t i_ioend_count; /* Number of outstanding io_end structs */
atomic_t i_unwritten; /* Nr. of inflight conversions pending */
- struct work_struct i_rsv_conversion_work;
- struct work_struct i_unrsv_conversion_work;
+ struct work_struct i_unwritten_work; /* deferred extent conversion */
spinlock_t i_block_reservation_lock;
@@ -1241,6 +1245,7 @@ struct ext4_sb_info {
unsigned int s_mb_stats;
unsigned int s_mb_order2_reqs;
unsigned int s_mb_group_prealloc;
+ unsigned int s_max_writeback_mb_bump;
unsigned int s_max_dir_size_kb;
/* where last allocation was done - for stream allocation */
unsigned long s_mb_last_group;
@@ -1276,10 +1281,8 @@ struct ext4_sb_info {
struct flex_groups *s_flex_groups;
ext4_group_t s_flex_groups_allocated;
- /* workqueue for unreserved extent convertions (dio) */
- struct workqueue_struct *unrsv_conversion_wq;
- /* workqueue for reserved extent conversions (buffered io) */
- struct workqueue_struct *rsv_conversion_wq;
+ /* workqueue for dio unwritten */
+ struct workqueue_struct *dio_unwritten_wq;
/* timer for periodic error stats printing */
struct timer_list s_err_report;
@@ -1304,7 +1307,6 @@ struct ext4_sb_info {
/* Reclaim extents from extent status tree */
struct shrinker s_es_shrinker;
struct list_head s_es_lru;
- unsigned long s_es_last_sorted;
struct percpu_counter s_extent_cache_cnt;
spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
};
@@ -1340,9 +1342,6 @@ static inline void ext4_set_io_unwritten_flag(struct inode *inode,
struct ext4_io_end *io_end)
{
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
- /* Writeback has to have coversion transaction reserved */
- WARN_ON(EXT4_SB(inode->i_sb)->s_journal && !io_end->handle &&
- !(io_end->flag & EXT4_IO_END_DIRECT));
io_end->flag |= EXT4_IO_END_UNWRITTEN;
atomic_inc(&EXT4_I(inode)->i_unwritten);
}
@@ -2000,6 +1999,7 @@ static inline unsigned char get_dtype(struct super_block *sb, int filetype)
/* fsync.c */
extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
+extern int ext4_flush_unwritten_io(struct inode *);
/* hash.c */
extern int ext4fs_dirhash(const char *name, int len, struct
@@ -2088,7 +2088,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
extern int ext4_can_truncate(struct inode *inode);
extern void ext4_truncate(struct inode *);
-extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
+extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
extern void ext4_set_inode_flags(struct inode *);
extern void ext4_get_inode_flags(struct ext4_inode_info *);
@@ -2096,12 +2096,9 @@ extern int ext4_alloc_da_blocks(struct inode *inode);
extern void ext4_set_aops(struct inode *inode);
extern int ext4_writepage_trans_blocks(struct inode *);
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
-extern int ext4_block_truncate_page(handle_t *handle,
- struct address_space *mapping, loff_t from);
-extern int ext4_block_zero_page_range(handle_t *handle,
- struct address_space *mapping, loff_t from, loff_t length);
-extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
- loff_t lstart, loff_t lend);
+extern int ext4_discard_partial_page_buffers(handle_t *handle,
+ struct address_space *mapping, loff_t from,
+ loff_t length, int flags);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
@@ -2114,7 +2111,7 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs);
extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
-extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
+extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk);
extern void ext4_ind_truncate(handle_t *, struct inode *inode);
extern int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t first, ext4_lblk_t stop);
@@ -2169,96 +2166,42 @@ extern int ext4_alloc_flex_bg_array(struct super_block *sb,
ext4_group_t ngroup);
extern const char *ext4_decode_error(struct super_block *sb, int errno,
char nbuf[16]);
-
extern __printf(4, 5)
void __ext4_error(struct super_block *, const char *, unsigned int,
const char *, ...);
+#define ext4_error(sb, message...) __ext4_error(sb, __func__, \
+ __LINE__, ## message)
extern __printf(5, 6)
-void __ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t,
+void ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t,
const char *, ...);
extern __printf(5, 6)
-void __ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t,
+void ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t,
const char *, ...);
extern void __ext4_std_error(struct super_block *, const char *,
unsigned int, int);
extern __printf(4, 5)
void __ext4_abort(struct super_block *, const char *, unsigned int,
const char *, ...);
+#define ext4_abort(sb, message...) __ext4_abort(sb, __func__, \
+ __LINE__, ## message)
extern __printf(4, 5)
void __ext4_warning(struct super_block *, const char *, unsigned int,
const char *, ...);
+#define ext4_warning(sb, message...) __ext4_warning(sb, __func__, \
+ __LINE__, ## message)
extern __printf(3, 4)
-void __ext4_msg(struct super_block *, const char *, const char *, ...);
+void ext4_msg(struct super_block *, const char *, const char *, ...);
extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp,
const char *, unsigned int, const char *);
+#define dump_mmp_msg(sb, mmp, msg) __dump_mmp_msg(sb, mmp, __func__, \
+ __LINE__, msg)
extern __printf(7, 8)
void __ext4_grp_locked_error(const char *, unsigned int,
struct super_block *, ext4_group_t,
unsigned long, ext4_fsblk_t,
const char *, ...);
-
-#ifdef CONFIG_PRINTK
-
-#define ext4_error_inode(inode, func, line, block, fmt, ...) \
- __ext4_error_inode(inode, func, line, block, fmt, ##__VA_ARGS__)
-#define ext4_error_file(file, func, line, block, fmt, ...) \
- __ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__)
-#define ext4_error(sb, fmt, ...) \
- __ext4_error(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
-#define ext4_abort(sb, fmt, ...) \
- __ext4_abort(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
-#define ext4_warning(sb, fmt, ...) \
- __ext4_warning(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
-#define ext4_msg(sb, level, fmt, ...) \
- __ext4_msg(sb, level, fmt, ##__VA_ARGS__)
-#define dump_mmp_msg(sb, mmp, msg) \
- __dump_mmp_msg(sb, mmp, __func__, __LINE__, msg)
-#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...) \
- __ext4_grp_locked_error(__func__, __LINE__, sb, grp, ino, block, \
- fmt, ##__VA_ARGS__)
-
-#else
-
-#define ext4_error_inode(inode, func, line, block, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __ext4_error_inode(inode, "", 0, block, " "); \
-} while (0)
-#define ext4_error_file(file, func, line, block, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __ext4_error_file(file, "", 0, block, " "); \
-} while (0)
-#define ext4_error(sb, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __ext4_error(sb, "", 0, " "); \
-} while (0)
-#define ext4_abort(sb, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __ext4_abort(sb, "", 0, " "); \
-} while (0)
-#define ext4_warning(sb, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __ext4_warning(sb, "", 0, " "); \
-} while (0)
-#define ext4_msg(sb, level, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __ext4_msg(sb, "", " "); \
-} while (0)
-#define dump_mmp_msg(sb, mmp, msg) \
- __dump_mmp_msg(sb, mmp, "", 0, "")
-#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __ext4_grp_locked_error("", 0, sb, grp, ino, block, " "); \
-} while (0)
-
-#endif
-
+#define ext4_grp_locked_error(sb, grp, message...) \
+ __ext4_grp_locked_error(__func__, __LINE__, (sb), (grp), ## message)
extern void ext4_update_dynamic_rev(struct super_block *sb);
extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
__u32 compat);
@@ -2369,7 +2312,6 @@ struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
{
struct ext4_group_info ***grp_info;
long indexv, indexh;
- BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
grp_info = EXT4_SB(sb)->s_group_info;
indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
@@ -2573,7 +2515,7 @@ extern int ext4_try_create_inline_dir(handle_t *handle,
struct inode *parent,
struct inode *inode);
extern int ext4_read_inline_dir(struct file *filp,
- struct dir_context *ctx,
+ void *dirent, filldir_t filldir,
int *has_inline_data);
extern int htree_inlinedir_to_tree(struct file *dir_file,
struct inode *dir, ext4_lblk_t block,
@@ -2656,7 +2598,8 @@ struct ext4_extent;
extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
-extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
+extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
+ int chunk);
extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
extern void ext4_ext_truncate(handle_t *, struct inode *);
@@ -2666,8 +2609,8 @@ extern void ext4_ext_init(struct super_block *);
extern void ext4_ext_release(struct super_block *);
extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
-extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
- loff_t offset, ssize_t len);
+extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
+ ssize_t len);
extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
extern int ext4_ext_calc_metadata_amount(struct inode *inode,
@@ -2707,15 +2650,12 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
/* page-io.c */
extern int __init ext4_init_pageio(void);
+extern void ext4_add_complete_io(ext4_io_end_t *io_end);
extern void ext4_exit_pageio(void);
+extern void ext4_ioend_shutdown(struct inode *);
+extern void ext4_free_io_end(ext4_io_end_t *io);
extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
-extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end);
-extern int ext4_put_io_end(ext4_io_end_t *io_end);
-extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
-extern void ext4_io_submit_init(struct ext4_io_submit *io,
- struct writeback_control *wbc);
-extern void ext4_end_io_rsv_work(struct work_struct *work);
-extern void ext4_end_io_unrsv_work(struct work_struct *work);
+extern void ext4_end_io_work(struct work_struct *work);
extern void ext4_io_submit(struct ext4_io_submit *io);
extern int ext4_bio_write_page(struct ext4_io_submit *io,
struct page *page,
@@ -2728,17 +2668,20 @@ extern void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp);
extern int ext4_mmp_csum_verify(struct super_block *sb,
struct mmp_struct *mmp);
-/*
- * Note that these flags will never ever appear in a buffer_head's state flag.
- * See EXT4_MAP_... to see where this is used.
- */
+/* BH_Uninit flag: blocks are allocated but uninitialized on disk */
enum ext4_state_bits {
BH_Uninit /* blocks are allocated but uninitialized on disk */
- = BH_JBDPrivateStart,
+ = BH_JBDPrivateStart,
BH_AllocFromCluster, /* allocated blocks were part of already
- * allocated cluster. */
+ * allocated cluster. Note that this flag will
+ * never, ever appear in a buffer_head's state
+ * flag. See EXT4_MAP_FROM_CLUSTER to see where
+ * this is used. */
};
+BUFFER_FNS(Uninit, uninit)
+TAS_BUFFER_FNS(Uninit, uninit)
+
/*
* Add new method to test whether block and inode bitmaps are properly
* initialized. With uninit_bg reading the block from disk is not enough
diff --git a/trunk/fs/ext4/ext4_jbd2.c b/trunk/fs/ext4/ext4_jbd2.c
index 72a3600aedbd..451eb4045330 100644
--- a/trunk/fs/ext4/ext4_jbd2.c
+++ b/trunk/fs/ext4/ext4_jbd2.c
@@ -38,43 +38,31 @@ static void ext4_put_nojournal(handle_t *handle)
/*
* Wrappers for jbd2_journal_start/end.
*/
-static int ext4_journal_check_start(struct super_block *sb)
+handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
+ int type, int nblocks)
{
journal_t *journal;
might_sleep();
+
+ trace_ext4_journal_start(sb, nblocks, _RET_IP_);
if (sb->s_flags & MS_RDONLY)
- return -EROFS;
+ return ERR_PTR(-EROFS);
+
WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
journal = EXT4_SB(sb)->s_journal;
+ if (!journal)
+ return ext4_get_nojournal();
/*
* Special case here: if the journal has aborted behind our
* backs (eg. EIO in the commit thread), then we still need to
* take the FS itself readonly cleanly.
*/
- if (journal && is_journal_aborted(journal)) {
+ if (is_journal_aborted(journal)) {
ext4_abort(sb, "Detected aborted journal");
- return -EROFS;
+ return ERR_PTR(-EROFS);
}
- return 0;
-}
-
-handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
- int type, int blocks, int rsv_blocks)
-{
- journal_t *journal;
- int err;
-
- trace_ext4_journal_start(sb, blocks, rsv_blocks, _RET_IP_);
- err = ext4_journal_check_start(sb);
- if (err < 0)
- return ERR_PTR(err);
-
- journal = EXT4_SB(sb)->s_journal;
- if (!journal)
- return ext4_get_nojournal();
- return jbd2__journal_start(journal, blocks, rsv_blocks, GFP_NOFS,
- type, line);
+ return jbd2__journal_start(journal, nblocks, GFP_NOFS, type, line);
}
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
@@ -98,30 +86,6 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
return err;
}
-handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
- int type)
-{
- struct super_block *sb;
- int err;
-
- if (!ext4_handle_valid(handle))
- return ext4_get_nojournal();
-
- sb = handle->h_journal->j_private;
- trace_ext4_journal_start_reserved(sb, handle->h_buffer_credits,
- _RET_IP_);
- err = ext4_journal_check_start(sb);
- if (err < 0) {
- jbd2_journal_free_reserved(handle);
- return ERR_PTR(err);
- }
-
- err = jbd2_journal_start_reserved(handle, type, line);
- if (err < 0)
- return ERR_PTR(err);
- return handle;
-}
-
void ext4_journal_abort_handle(const char *caller, unsigned int line,
const char *err_fn, struct buffer_head *bh,
handle_t *handle, int err)
diff --git a/trunk/fs/ext4/ext4_jbd2.h b/trunk/fs/ext4/ext4_jbd2.h
index 2877258d9497..c8c6885406db 100644
--- a/trunk/fs/ext4/ext4_jbd2.h
+++ b/trunk/fs/ext4/ext4_jbd2.h
@@ -134,8 +134,7 @@ static inline int ext4_jbd2_credits_xattr(struct inode *inode)
#define EXT4_HT_MIGRATE 8
#define EXT4_HT_MOVE_EXTENTS 9
#define EXT4_HT_XATTR 10
-#define EXT4_HT_EXT_CONVERT 11
-#define EXT4_HT_MAX 12
+#define EXT4_HT_MAX 11
/**
* struct ext4_journal_cb_entry - Base structure for callback information.
@@ -266,7 +265,7 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
- int type, int blocks, int rsv_blocks);
+ int type, int nblocks);
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
#define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
@@ -301,37 +300,21 @@ static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
}
#define ext4_journal_start_sb(sb, type, nblocks) \
- __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0)
+ __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks))
#define ext4_journal_start(inode, type, nblocks) \
- __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0)
-
-#define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks) \
- __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks))
+ __ext4_journal_start((inode), __LINE__, (type), (nblocks))
static inline handle_t *__ext4_journal_start(struct inode *inode,
unsigned int line, int type,
- int blocks, int rsv_blocks)
+ int nblocks)
{
- return __ext4_journal_start_sb(inode->i_sb, line, type, blocks,
- rsv_blocks);
+ return __ext4_journal_start_sb(inode->i_sb, line, type, nblocks);
}
#define ext4_journal_stop(handle) \
__ext4_journal_stop(__func__, __LINE__, (handle))
-#define ext4_journal_start_reserved(handle, type) \
- __ext4_journal_start_reserved((handle), __LINE__, (type))
-
-handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
- int type);
-
-static inline void ext4_journal_free_reserved(handle_t *handle)
-{
- if (ext4_handle_valid(handle))
- jbd2_journal_free_reserved(handle);
-}
-
static inline handle_t *ext4_journal_current_handle(void)
{
return journal_current_handle();
diff --git a/trunk/fs/ext4/extents.c b/trunk/fs/ext4/extents.c
index 7097b0f680e6..bc0f1910b9cf 100644
--- a/trunk/fs/ext4/extents.c
+++ b/trunk/fs/ext4/extents.c
@@ -2125,8 +2125,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
next_del = ext4_find_delayed_extent(inode, &es);
if (!exists && next_del) {
exists = 1;
- flags |= (FIEMAP_EXTENT_DELALLOC |
- FIEMAP_EXTENT_UNKNOWN);
+ flags |= FIEMAP_EXTENT_DELALLOC;
}
up_read(&EXT4_I(inode)->i_data_sem);
@@ -2329,15 +2328,17 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
}
/*
- * How many index/leaf blocks need to change/allocate to add @extents extents?
+ * How many index/leaf blocks need to change/allocate to modify nrblocks?
*
- * If we add a single extent, then in the worse case, each tree level
- * index/leaf need to be changed in case of the tree split.
+ * if nrblocks are fit in a single extent (chunk flag is 1), then
+ * in the worse case, each tree level index/leaf need to be changed
+ * if the tree split due to insert a new extent, then the old tree
+ * index/leaf need to be updated too
*
- * If more extents are inserted, they could cause the whole tree split more
- * than once, but this is really rare.
+ * If the nrblocks are discontiguous, they could cause
+ * the whole tree split more than once, but this is really rare.
*/
-int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
+int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
int index;
int depth;
@@ -2348,7 +2349,7 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
depth = ext_depth(inode);
- if (extents <= 1)
+ if (chunk)
index = depth * 2;
else
index = depth * 3;
@@ -2356,24 +2357,20 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
return index;
}
-static inline int get_default_free_blocks_flags(struct inode *inode)
-{
- if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
- else if (ext4_should_journal_data(inode))
- return EXT4_FREE_BLOCKS_FORGET;
- return 0;
-}
-
static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent *ex,
- long long *partial_cluster,
+ ext4_fsblk_t *partial_cluster,
ext4_lblk_t from, ext4_lblk_t to)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned short ee_len = ext4_ext_get_actual_len(ex);
ext4_fsblk_t pblk;
- int flags = get_default_free_blocks_flags(inode);
+ int flags = 0;
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
+ else if (ext4_should_journal_data(inode))
+ flags |= EXT4_FREE_BLOCKS_FORGET;
/*
* For bigalloc file systems, we never free a partial cluster
@@ -2391,8 +2388,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
* partial cluster here.
*/
pblk = ext4_ext_pblock(ex) + ee_len - 1;
- if ((*partial_cluster > 0) &&
- (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
+ if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(sbi, *partial_cluster),
sbi->s_cluster_ratio, flags);
@@ -2418,46 +2414,41 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
&& to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
/* tail removal */
ext4_lblk_t num;
- unsigned int unaligned;
num = le32_to_cpu(ex->ee_block) + ee_len - from;
pblk = ext4_ext_pblock(ex) + ee_len - num;
- /*
- * Usually we want to free partial cluster at the end of the
- * extent, except for the situation when the cluster is still
- * used by any other extent (partial_cluster is negative).
- */
- if (*partial_cluster < 0 &&
- -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1))
- flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
-
- ext_debug("free last %u blocks starting %llu partial %lld\n",
- num, pblk, *partial_cluster);
+ ext_debug("free last %u blocks starting %llu\n", num, pblk);
ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
/*
* If the block range to be freed didn't start at the
* beginning of a cluster, and we removed the entire
- * extent and the cluster is not used by any other extent,
- * save the partial cluster here, since we might need to
- * delete if we determine that the truncate operation has
- * removed all of the blocks in the cluster.
- *
- * On the other hand, if we did not manage to free the whole
- * extent, we have to mark the cluster as used (store negative
- * cluster number in partial_cluster).
+ * extent, save the partial cluster here, since we
+ * might need to delete if we determine that the
+ * truncate operation has removed all of the blocks in
+ * the cluster.
*/
- unaligned = pblk & (sbi->s_cluster_ratio - 1);
- if (unaligned && (ee_len == num) &&
- (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
+ if (pblk & (sbi->s_cluster_ratio - 1) &&
+ (ee_len == num))
*partial_cluster = EXT4_B2C(sbi, pblk);
- else if (unaligned)
- *partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
- else if (*partial_cluster > 0)
+ else
*partial_cluster = 0;
- } else
- ext4_error(sbi->s_sb, "strange request: removal(2) "
- "%u-%u from %u:%u\n",
- from, to, le32_to_cpu(ex->ee_block), ee_len);
+ } else if (from == le32_to_cpu(ex->ee_block)
+ && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
+ /* head removal */
+ ext4_lblk_t num;
+ ext4_fsblk_t start;
+
+ num = to - from;
+ start = ext4_ext_pblock(ex);
+
+ ext_debug("free first %u blocks starting %llu\n", num, start);
+ ext4_free_blocks(handle, inode, NULL, start, num, flags);
+
+ } else {
+ printk(KERN_INFO "strange request: removal(2) "
+ "%u-%u from %u:%u\n",
+ from, to, le32_to_cpu(ex->ee_block), ee_len);
+ }
return 0;
}
@@ -2470,16 +2461,12 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
* @handle: The journal handle
* @inode: The files inode
* @path: The path to the leaf
- * @partial_cluster: The cluster which we'll have to free if all extents
- * has been released from it. It gets negative in case
- * that the cluster is still used.
* @start: The first block to remove
* @end: The last block to remove
*/
static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
- struct ext4_ext_path *path,
- long long *partial_cluster,
+ struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
ext4_lblk_t start, ext4_lblk_t end)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2492,7 +2479,6 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
unsigned short ex_ee_len;
unsigned uninitialized = 0;
struct ext4_extent *ex;
- ext4_fsblk_t pblk;
/* the header must be checked already in ext4_ext_remove_space() */
ext_debug("truncate since %u in leaf to %u\n", start, end);
@@ -2504,9 +2490,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
return -EIO;
}
/* find where to start removing */
- ex = path[depth].p_ext;
- if (!ex)
- ex = EXT_LAST_EXTENT(eh);
+ ex = EXT_LAST_EXTENT(eh);
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2533,16 +2517,6 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
/* If this extent is beyond the end of the hole, skip it */
if (end < ex_ee_block) {
- /*
- * We're going to skip this extent and move to another,
- * so if this extent is not cluster aligned we have
- * to mark the current cluster as used to avoid
- * accidentally freeing it later on
- */
- pblk = ext4_ext_pblock(ex);
- if (pblk & (sbi->s_cluster_ratio - 1))
- *partial_cluster =
- -((long long)EXT4_B2C(sbi, pblk));
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2618,7 +2592,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
sizeof(struct ext4_extent));
}
le16_add_cpu(&eh->eh_entries, -1);
- } else if (*partial_cluster > 0)
+ } else
*partial_cluster = 0;
err = ext4_ext_dirty(handle, inode, path + depth);
@@ -2636,13 +2610,17 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
err = ext4_ext_correct_indexes(handle, inode, path);
/*
- * Free the partial cluster only if the current extent does not
- * reference it. Otherwise we might free used cluster.
+ * If there is still a entry in the leaf node, check to see if
+ * it references the partial cluster. This is the only place
+ * where it could; if it doesn't, we can free the cluster.
*/
- if (*partial_cluster > 0 &&
+ if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
(EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
*partial_cluster)) {
- int flags = get_default_free_blocks_flags(inode);
+ int flags = EXT4_FREE_BLOCKS_FORGET;
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ flags |= EXT4_FREE_BLOCKS_METADATA;
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(sbi, *partial_cluster),
@@ -2686,7 +2664,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
struct super_block *sb = inode->i_sb;
int depth = ext_depth(inode);
struct ext4_ext_path *path = NULL;
- long long partial_cluster = 0;
+ ext4_fsblk_t partial_cluster = 0;
handle_t *handle;
int i = 0, err = 0;
@@ -2698,7 +2676,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
return PTR_ERR(handle);
again:
- trace_ext4_ext_remove_space(inode, start, end, depth);
+ trace_ext4_ext_remove_space(inode, start, depth);
/*
* Check if we are removing extents inside the extent tree. If that
@@ -2866,14 +2844,17 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
}
}
- trace_ext4_ext_remove_space_done(inode, start, end, depth,
- partial_cluster, path->p_hdr->eh_entries);
+ trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
+ path->p_hdr->eh_entries);
/* If we still have something in the partial cluster and we have removed
* even the first extent, then we should free the blocks in the partial
* cluster as well. */
- if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) {
- int flags = get_default_free_blocks_flags(inode);
+ if (partial_cluster && path->p_hdr->eh_entries == 0) {
+ int flags = EXT4_FREE_BLOCKS_FORGET;
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ flags |= EXT4_FREE_BLOCKS_METADATA;
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(EXT4_SB(sb), partial_cluster),
@@ -4382,7 +4363,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
}
out3:
- trace_ext4_ext_map_blocks_exit(inode, flags, map, err ? err : allocated);
+ trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated);
return err ? err : allocated;
}
@@ -4465,7 +4446,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
return -EOPNOTSUPP;
if (mode & FALLOC_FL_PUNCH_HOLE)
- return ext4_punch_hole(inode, offset, len);
+ return ext4_punch_hole(file, offset, len);
ret = ext4_convert_inline_data(inode);
if (ret)
@@ -4567,9 +4548,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
* function, to convert the fallocated extents after IO is completed.
* Returns 0 on success.
*/
-int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
- loff_t offset, ssize_t len)
+int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
+ ssize_t len)
{
+ handle_t *handle;
unsigned int max_blocks;
int ret = 0;
int ret2 = 0;
@@ -4584,32 +4566,16 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
map.m_lblk);
/*
- * This is somewhat ugly but the idea is clear: When transaction is
- * reserved, everything goes into it. Otherwise we rather start several
- * smaller transactions for conversion of each extent separately.
+ * credits to insert 1 extent into extent tree
*/
- if (handle) {
- handle = ext4_journal_start_reserved(handle,
- EXT4_HT_EXT_CONVERT);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- credits = 0;
- } else {
- /*
- * credits to insert 1 extent into extent tree
- */
- credits = ext4_chunk_trans_blocks(inode, max_blocks);
- }
+ credits = ext4_chunk_trans_blocks(inode, max_blocks);
while (ret >= 0 && ret < max_blocks) {
map.m_lblk += ret;
map.m_len = (max_blocks -= ret);
- if (credits) {
- handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
- credits);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- break;
- }
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ break;
}
ret = ext4_map_blocks(handle, inode, &map,
EXT4_GET_BLOCKS_IO_CONVERT_EXT);
@@ -4620,13 +4586,10 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
inode->i_ino, map.m_lblk,
map.m_len, ret);
ext4_mark_inode_dirty(handle, inode);
- if (credits)
- ret2 = ext4_journal_stop(handle);
- if (ret <= 0 || ret2)
+ ret2 = ext4_journal_stop(handle);
+ if (ret <= 0 || ret2 )
break;
}
- if (!credits)
- ret2 = ext4_journal_stop(handle);
return ret > 0 ? ret2 : ret;
}
@@ -4696,7 +4659,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
error = ext4_get_inode_loc(inode, &iloc);
if (error)
return error;
- physical = (__u64)iloc.bh->b_blocknr << blockbits;
+ physical = iloc.bh->b_blocknr << blockbits;
offset = EXT4_GOOD_OLD_INODE_SIZE +
EXT4_I(inode)->i_extra_isize;
physical += offset;
@@ -4704,7 +4667,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
flags |= FIEMAP_EXTENT_DATA_INLINE;
brelse(iloc.bh);
} else { /* external block */
- physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
+ physical = EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize;
}
diff --git a/trunk/fs/ext4/extents_status.c b/trunk/fs/ext4/extents_status.c
index ee018d5f397e..e6941e622d31 100644
--- a/trunk/fs/ext4/extents_status.c
+++ b/trunk/fs/ext4/extents_status.c
@@ -10,7 +10,6 @@
* Ext4 extents status tree core functions.
*/
#include
-#include
#include "ext4.h"
#include "extents_status.h"
#include "ext4_extents.h"
@@ -292,6 +291,7 @@ void ext4_es_find_delayed_extent_range(struct inode *inode,
read_unlock(&EXT4_I(inode)->i_es_lock);
+ ext4_es_lru_add(inode);
trace_ext4_es_find_delayed_extent_range_exit(inode, es);
}
@@ -672,6 +672,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
+ ext4_es_lru_add(inode);
ext4_es_print_tree(inode);
return err;
@@ -733,6 +734,7 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
read_unlock(&EXT4_I(inode)->i_es_lock);
+ ext4_es_lru_add(inode);
trace_ext4_es_lookup_extent_exit(inode, es, found);
return found;
}
@@ -876,28 +878,12 @@ int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex)
EXTENT_STATUS_WRITTEN);
}
-static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
- struct list_head *b)
-{
- struct ext4_inode_info *eia, *eib;
- eia = list_entry(a, struct ext4_inode_info, i_es_lru);
- eib = list_entry(b, struct ext4_inode_info, i_es_lru);
-
- if (eia->i_touch_when == eib->i_touch_when)
- return 0;
- if (time_after(eia->i_touch_when, eib->i_touch_when))
- return 1;
- else
- return -1;
-}
-
static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
{
struct ext4_sb_info *sbi = container_of(shrink,
struct ext4_sb_info, s_es_shrinker);
struct ext4_inode_info *ei;
- struct list_head *cur, *tmp;
- LIST_HEAD(skiped);
+ struct list_head *cur, *tmp, scanned;
int nr_to_scan = sc->nr_to_scan;
int ret, nr_shrunk = 0;
@@ -907,41 +893,23 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
if (!nr_to_scan)
return ret;
- spin_lock(&sbi->s_es_lru_lock);
-
- /*
- * If the inode that is at the head of LRU list is newer than
- * last_sorted time, that means that we need to sort this list.
- */
- ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, i_es_lru);
- if (sbi->s_es_last_sorted < ei->i_touch_when) {
- list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
- sbi->s_es_last_sorted = jiffies;
- }
+ INIT_LIST_HEAD(&scanned);
+ spin_lock(&sbi->s_es_lru_lock);
list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
- /*
- * If we have already reclaimed all extents from extent
- * status tree, just stop the loop immediately.
- */
- if (percpu_counter_read_positive(&sbi->s_extent_cache_cnt) == 0)
- break;
+ list_move_tail(cur, &scanned);
ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
- /* Skip the inode that is newer than the last_sorted time */
- if (sbi->s_es_last_sorted < ei->i_touch_when) {
- list_move_tail(cur, &skiped);
+ read_lock(&ei->i_es_lock);
+ if (ei->i_es_lru_nr == 0) {
+ read_unlock(&ei->i_es_lock);
continue;
}
-
- if (ei->i_es_lru_nr == 0)
- continue;
+ read_unlock(&ei->i_es_lock);
write_lock(&ei->i_es_lock);
ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
- if (ei->i_es_lru_nr == 0)
- list_del_init(&ei->i_es_lru);
write_unlock(&ei->i_es_lock);
nr_shrunk += ret;
@@ -949,9 +917,7 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
if (nr_to_scan == 0)
break;
}
-
- /* Move the newer inodes into the tail of the LRU list. */
- list_splice_tail(&skiped, &sbi->s_es_lru);
+ list_splice_tail(&scanned, &sbi->s_es_lru);
spin_unlock(&sbi->s_es_lru_lock);
ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
@@ -959,19 +925,21 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
return ret;
}
-void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
+void ext4_es_register_shrinker(struct super_block *sb)
{
+ struct ext4_sb_info *sbi;
+
+ sbi = EXT4_SB(sb);
INIT_LIST_HEAD(&sbi->s_es_lru);
spin_lock_init(&sbi->s_es_lru_lock);
- sbi->s_es_last_sorted = 0;
sbi->s_es_shrinker.shrink = ext4_es_shrink;
sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&sbi->s_es_shrinker);
}
-void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
+void ext4_es_unregister_shrinker(struct super_block *sb)
{
- unregister_shrinker(&sbi->s_es_shrinker);
+ unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker);
}
void ext4_es_lru_add(struct inode *inode)
@@ -979,14 +947,11 @@ void ext4_es_lru_add(struct inode *inode)
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- ei->i_touch_when = jiffies;
-
- if (!list_empty(&ei->i_es_lru))
- return;
-
spin_lock(&sbi->s_es_lru_lock);
if (list_empty(&ei->i_es_lru))
list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
+ else
+ list_move_tail(&ei->i_es_lru, &sbi->s_es_lru);
spin_unlock(&sbi->s_es_lru_lock);
}
diff --git a/trunk/fs/ext4/extents_status.h b/trunk/fs/ext4/extents_status.h
index e936730cc5b0..f740eb03b707 100644
--- a/trunk/fs/ext4/extents_status.h
+++ b/trunk/fs/ext4/extents_status.h
@@ -39,7 +39,6 @@
EXTENT_STATUS_DELAYED | \
EXTENT_STATUS_HOLE)
-struct ext4_sb_info;
struct ext4_extent;
struct extent_status {
@@ -120,8 +119,8 @@ static inline void ext4_es_store_status(struct extent_status *es,
es->es_pblk = block;
}
-extern void ext4_es_register_shrinker(struct ext4_sb_info *sbi);
-extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
+extern void ext4_es_register_shrinker(struct super_block *sb);
+extern void ext4_es_unregister_shrinker(struct super_block *sb);
extern void ext4_es_lru_add(struct inode *inode);
extern void ext4_es_lru_del(struct inode *inode);
diff --git a/trunk/fs/ext4/file.c b/trunk/fs/ext4/file.c
index b19f0a457f32..b1b4d51b5d86 100644
--- a/trunk/fs/ext4/file.c
+++ b/trunk/fs/ext4/file.c
@@ -312,7 +312,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
blkbits = inode->i_sb->s_blocksize_bits;
startoff = *offset;
lastoff = startoff;
- endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
+ endoff = (map->m_lblk + map->m_len) << blkbits;
index = startoff >> PAGE_CACHE_SHIFT;
end = endoff >> PAGE_CACHE_SHIFT;
@@ -457,7 +457,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
if (last != start)
- dataoff = (loff_t)last << blkbits;
+ dataoff = last << blkbits;
break;
}
@@ -468,7 +468,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
ext4_es_find_delayed_extent_range(inode, last, last, &es);
if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
if (last != start)
- dataoff = (loff_t)last << blkbits;
+ dataoff = last << blkbits;
break;
}
@@ -486,7 +486,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
}
last++;
- dataoff = (loff_t)last << blkbits;
+ dataoff = last << blkbits;
} while (last <= end);
mutex_unlock(&inode->i_mutex);
@@ -540,7 +540,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
last += ret;
- holeoff = (loff_t)last << blkbits;
+ holeoff = last << blkbits;
continue;
}
@@ -551,7 +551,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
ext4_es_find_delayed_extent_range(inode, last, last, &es);
if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
last = es.es_lblk + es.es_len;
- holeoff = (loff_t)last << blkbits;
+ holeoff = last << blkbits;
continue;
}
@@ -566,7 +566,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
&map, &holeoff);
if (!unwritten) {
last += ret;
- holeoff = (loff_t)last << blkbits;
+ holeoff = last << blkbits;
continue;
}
}
diff --git a/trunk/fs/ext4/fsync.c b/trunk/fs/ext4/fsync.c
index a8bc47f75fa0..e0ba8a408def 100644
--- a/trunk/fs/ext4/fsync.c
+++ b/trunk/fs/ext4/fsync.c
@@ -73,6 +73,32 @@ static int ext4_sync_parent(struct inode *inode)
return ret;
}
+/**
+ * __sync_file - generic_file_fsync without the locking and filemap_write
+ * @inode: inode to sync
+ * @datasync: only sync essential metadata if true
+ *
+ * This is just generic_file_fsync without the locking. This is needed for
+ * nojournal mode to make sure this inodes data/metadata makes it to disk
+ * properly. The i_mutex should be held already.
+ */
+static int __sync_inode(struct inode *inode, int datasync)
+{
+ int err;
+ int ret;
+
+ ret = sync_mapping_buffers(inode->i_mapping);
+ if (!(inode->i_state & I_DIRTY))
+ return ret;
+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ return ret;
+
+ err = sync_inode_metadata(inode, 1);
+ if (ret == 0)
+ ret = err;
+ return ret;
+}
+
/*
* akpm: A new design for ext4_sync_file().
*
@@ -90,7 +116,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
struct inode *inode = file->f_mapping->host;
struct ext4_inode_info *ei = EXT4_I(inode);
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- int ret = 0, err;
+ int ret, err;
tid_t commit_tid;
bool needs_barrier = false;
@@ -98,24 +124,25 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
trace_ext4_sync_file_enter(file, datasync);
- if (inode->i_sb->s_flags & MS_RDONLY) {
- /* Make sure that we read updated s_mount_flags value */
- smp_rmb();
- if (EXT4_SB(inode->i_sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
- ret = -EROFS;
+ ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (ret)
+ return ret;
+ mutex_lock(&inode->i_mutex);
+
+ if (inode->i_sb->s_flags & MS_RDONLY)
+ goto out;
+
+ ret = ext4_flush_unwritten_io(inode);
+ if (ret < 0)
goto out;
- }
if (!journal) {
- ret = generic_file_fsync(file, start, end, datasync);
+ ret = __sync_inode(inode, datasync);
if (!ret && !hlist_empty(&inode->i_dentry))
ret = ext4_sync_parent(inode);
goto out;
}
- ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
- if (ret)
- return ret;
/*
* data=writeback,ordered:
* The caller's filemap_fdatawrite()/wait will sync the data.
@@ -145,7 +172,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (!ret)
ret = err;
}
-out:
+ out:
+ mutex_unlock(&inode->i_mutex);
trace_ext4_sync_file_exit(inode, ret);
return ret;
}
diff --git a/trunk/fs/ext4/ialloc.c b/trunk/fs/ext4/ialloc.c
index f03598c6ffd3..00a818d67b54 100644
--- a/trunk/fs/ext4/ialloc.c
+++ b/trunk/fs/ext4/ialloc.c
@@ -747,8 +747,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if (!handle) {
BUG_ON(nblocks <= 0);
handle = __ext4_journal_start_sb(dir->i_sb, line_no,
- handle_type, nblocks,
- 0);
+ handle_type, nblocks);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
ext4_std_error(sb, err);
diff --git a/trunk/fs/ext4/indirect.c b/trunk/fs/ext4/indirect.c
index 87b30cd357e7..b8d5d351e24f 100644
--- a/trunk/fs/ext4/indirect.c
+++ b/trunk/fs/ext4/indirect.c
@@ -624,7 +624,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
partial--;
}
out:
- trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
+ trace_ext4_ind_map_blocks_exit(inode, map, err);
return err;
}
@@ -675,6 +675,11 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
retry:
if (rw == READ && ext4_should_dioread_nolock(inode)) {
+ if (unlikely(atomic_read(&EXT4_I(inode)->i_unwritten))) {
+ mutex_lock(&inode->i_mutex);
+ ext4_flush_unwritten_io(inode);
+ mutex_unlock(&inode->i_mutex);
+ }
/*
* Nolock dioread optimization may be dynamically disabled
* via ext4_inode_block_unlocked_dio(). Check inode's state
@@ -774,18 +779,27 @@ int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
}
-/*
- * Calculate number of indirect blocks touched by mapping @nrblocks logically
- * contiguous blocks
- */
-int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
+int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
+ int indirects;
+
+ /* if nrblocks are contiguous */
+ if (chunk) {
+ /*
+ * With N contiguous data blocks, we need at most
+ * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+ * 2 dindirect blocks, and 1 tindirect block
+ */
+ return DIV_ROUND_UP(nrblocks,
+ EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
+ }
/*
- * With N contiguous data blocks, we need at most
- * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
- * 2 dindirect blocks, and 1 tindirect block
+ * if nrblocks are not contiguous, worse case, each block touch
+ * a indirect block, and each indirect block touch a double indirect
+ * block, plus a triple indirect block
*/
- return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
+ indirects = nrblocks * 2 + 1;
+ return indirects;
}
/*
@@ -926,13 +940,11 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
__le32 *last)
{
__le32 *p;
- int flags = EXT4_FREE_BLOCKS_VALIDATED;
+ int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
int err;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
- else if (ext4_should_journal_data(inode))
- flags |= EXT4_FREE_BLOCKS_FORGET;
+ flags |= EXT4_FREE_BLOCKS_METADATA;
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
count)) {
diff --git a/trunk/fs/ext4/inline.c b/trunk/fs/ext4/inline.c
index d9ecbf1113a7..3e2bf873e8a8 100644
--- a/trunk/fs/ext4/inline.c
+++ b/trunk/fs/ext4/inline.c
@@ -72,7 +72,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
entry = (struct ext4_xattr_entry *)
((void *)raw_inode + EXT4_I(inode)->i_inline_off);
- free += EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size));
+ free += le32_to_cpu(entry->e_value_size);
goto out;
}
@@ -1404,15 +1404,16 @@ int htree_inlinedir_to_tree(struct file *dir_file,
* offset as if '.' and '..' really take place.
*
*/
-int ext4_read_inline_dir(struct file *file,
- struct dir_context *ctx,
+int ext4_read_inline_dir(struct file *filp,
+ void *dirent, filldir_t filldir,
int *has_inline_data)
{
+ int error = 0;
unsigned int offset, parent_ino;
- int i;
+ int i, stored;
struct ext4_dir_entry_2 *de;
struct super_block *sb;
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
int ret, inline_size = 0;
struct ext4_iloc iloc;
void *dir_buf = NULL;
@@ -1443,8 +1444,9 @@ int ext4_read_inline_dir(struct file *file,
goto out;
sb = inode->i_sb;
+ stored = 0;
parent_ino = le32_to_cpu(((struct ext4_dir_entry_2 *)dir_buf)->inode);
- offset = ctx->pos;
+ offset = filp->f_pos;
/*
* dotdot_offset and dotdot_size is the real offset and
@@ -1458,74 +1460,104 @@ int ext4_read_inline_dir(struct file *file,
extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE;
extra_size = extra_offset + inline_size;
- /*
- * If the version has changed since the last call to
- * readdir(2), then we might be pointing to an invalid
- * dirent right now. Scan from the start of the inline
- * dir to make sure.
- */
- if (file->f_version != inode->i_version) {
- for (i = 0; i < extra_size && i < offset;) {
- /*
- * "." is with offset 0 and
- * ".." is dotdot_offset.
- */
- if (!i) {
- i = dotdot_offset;
- continue;
- } else if (i == dotdot_offset) {
- i = dotdot_size;
- continue;
+ while (!error && !stored && filp->f_pos < extra_size) {
+revalidate:
+ /*
+ * If the version has changed since the last call to
+ * readdir(2), then we might be pointing to an invalid
+ * dirent right now. Scan from the start of the inline
+ * dir to make sure.
+ */
+ if (filp->f_version != inode->i_version) {
+ for (i = 0; i < extra_size && i < offset;) {
+ /*
+ * "." is with offset 0 and
+ * ".." is dotdot_offset.
+ */
+ if (!i) {
+ i = dotdot_offset;
+ continue;
+ } else if (i == dotdot_offset) {
+ i = dotdot_size;
+ continue;
+ }
+ /* for other entry, the real offset in
+ * the buf has to be tuned accordingly.
+ */
+ de = (struct ext4_dir_entry_2 *)
+ (dir_buf + i - extra_offset);
+ /* It's too expensive to do a full
+ * dirent test each time round this
+ * loop, but we do have to test at
+ * least that it is non-zero. A
+ * failure will be detected in the
+ * dirent test below. */
+ if (ext4_rec_len_from_disk(de->rec_len,
+ extra_size) < EXT4_DIR_REC_LEN(1))
+ break;
+ i += ext4_rec_len_from_disk(de->rec_len,
+ extra_size);
}
- /* for other entry, the real offset in
- * the buf has to be tuned accordingly.
- */
- de = (struct ext4_dir_entry_2 *)
- (dir_buf + i - extra_offset);
- /* It's too expensive to do a full
- * dirent test each time round this
- * loop, but we do have to test at
- * least that it is non-zero. A
- * failure will be detected in the
- * dirent test below. */
- if (ext4_rec_len_from_disk(de->rec_len, extra_size)
- < EXT4_DIR_REC_LEN(1))
- break;
- i += ext4_rec_len_from_disk(de->rec_len,
- extra_size);
+ offset = i;
+ filp->f_pos = offset;
+ filp->f_version = inode->i_version;
}
- offset = i;
- ctx->pos = offset;
- file->f_version = inode->i_version;
- }
- while (ctx->pos < extra_size) {
- if (ctx->pos == 0) {
- if (!dir_emit(ctx, ".", 1, inode->i_ino, DT_DIR))
- goto out;
- ctx->pos = dotdot_offset;
- continue;
- }
+ while (!error && filp->f_pos < extra_size) {
+ if (filp->f_pos == 0) {
+ error = filldir(dirent, ".", 1, 0, inode->i_ino,
+ DT_DIR);
+ if (error)
+ break;
+ stored++;
+ filp->f_pos = dotdot_offset;
+ continue;
+ }
- if (ctx->pos == dotdot_offset) {
- if (!dir_emit(ctx, "..", 2, parent_ino, DT_DIR))
- goto out;
- ctx->pos = dotdot_size;
- continue;
- }
+ if (filp->f_pos == dotdot_offset) {
+ error = filldir(dirent, "..", 2,
+ dotdot_offset,
+ parent_ino, DT_DIR);
+ if (error)
+ break;
+ stored++;
- de = (struct ext4_dir_entry_2 *)
- (dir_buf + ctx->pos - extra_offset);
- if (ext4_check_dir_entry(inode, file, de, iloc.bh, dir_buf,
- extra_size, ctx->pos))
- goto out;
- if (le32_to_cpu(de->inode)) {
- if (!dir_emit(ctx, de->name, de->name_len,
- le32_to_cpu(de->inode),
- get_dtype(sb, de->file_type)))
+ filp->f_pos = dotdot_size;
+ continue;
+ }
+
+ de = (struct ext4_dir_entry_2 *)
+ (dir_buf + filp->f_pos - extra_offset);
+ if (ext4_check_dir_entry(inode, filp, de,
+ iloc.bh, dir_buf,
+ extra_size, filp->f_pos)) {
+ ret = stored;
goto out;
+ }
+ if (le32_to_cpu(de->inode)) {
+ /* We might block in the next section
+ * if the data destination is
+ * currently swapped out. So, use a
+ * version stamp to detect whether or
+ * not the directory has been modified
+ * during the copy operation.
+ */
+ u64 version = filp->f_version;
+
+ error = filldir(dirent, de->name,
+ de->name_len,
+ filp->f_pos,
+ le32_to_cpu(de->inode),
+ get_dtype(sb, de->file_type));
+ if (error)
+ break;
+ if (version != filp->f_version)
+ goto revalidate;
+ stored++;
+ }
+ filp->f_pos += ext4_rec_len_from_disk(de->rec_len,
+ extra_size);
}
- ctx->pos += ext4_rec_len_from_disk(de->rec_len, extra_size);
}
out:
kfree(dir_buf);
@@ -1810,7 +1842,7 @@ int ext4_inline_data_fiemap(struct inode *inode,
if (error)
goto out;
- physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
+ physical = iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
physical += offsetof(struct ext4_inode, i_block);
length = i_size_read(inode);
diff --git a/trunk/fs/ext4/inode.c b/trunk/fs/ext4/inode.c
index 0188e65e1f58..d6382b89ecbd 100644
--- a/trunk/fs/ext4/inode.c
+++ b/trunk/fs/ext4/inode.c
@@ -132,12 +132,12 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
new_size);
}
-static void ext4_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
+static void ext4_invalidatepage(struct page *page, unsigned long offset);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
-static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
- int pextents);
+static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
+ struct inode *inode, struct page *page, loff_t from,
+ loff_t length, int flags);
/*
* Test whether an inode is a fast symlink.
@@ -215,8 +215,7 @@ void ext4_evict_inode(struct inode *inode)
filemap_write_and_wait(&inode->i_data);
}
truncate_inode_pages(&inode->i_data, 0);
-
- WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
+ ext4_ioend_shutdown(inode);
goto no_delete;
}
@@ -226,8 +225,8 @@ void ext4_evict_inode(struct inode *inode)
if (ext4_should_order_data(inode))
ext4_begin_ordered_truncate(inode, 0);
truncate_inode_pages(&inode->i_data, 0);
+ ext4_ioend_shutdown(inode);
- WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
if (is_bad_inode(inode))
goto no_delete;
@@ -424,6 +423,66 @@ static int __check_block_validity(struct inode *inode, const char *func,
#define check_block_validity(inode, map) \
__check_block_validity((inode), __func__, __LINE__, (map))
+/*
+ * Return the number of contiguous dirty pages in a given inode
+ * starting at page frame idx.
+ */
+static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
+ unsigned int max_pages)
+{
+ struct address_space *mapping = inode->i_mapping;
+ pgoff_t index;
+ struct pagevec pvec;
+ pgoff_t num = 0;
+ int i, nr_pages, done = 0;
+
+ if (max_pages == 0)
+ return 0;
+ pagevec_init(&pvec, 0);
+ while (!done) {
+ index = idx;
+ nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+ PAGECACHE_TAG_DIRTY,
+ (pgoff_t)PAGEVEC_SIZE);
+ if (nr_pages == 0)
+ break;
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+ struct buffer_head *bh, *head;
+
+ lock_page(page);
+ if (unlikely(page->mapping != mapping) ||
+ !PageDirty(page) ||
+ PageWriteback(page) ||
+ page->index != idx) {
+ done = 1;
+ unlock_page(page);
+ break;
+ }
+ if (page_has_buffers(page)) {
+ bh = head = page_buffers(page);
+ do {
+ if (!buffer_delay(bh) &&
+ !buffer_unwritten(bh))
+ done = 1;
+ bh = bh->b_this_page;
+ } while (!done && (bh != head));
+ }
+ unlock_page(page);
+ if (done)
+ break;
+ idx++;
+ num++;
+ if (num >= max_pages) {
+ done = 1;
+ break;
+ }
+ }
+ pagevec_release(&pvec);
+ }
+ return num;
+}
+
#ifdef ES_AGGRESSIVE_TEST
static void ext4_map_blocks_es_recheck(handle_t *handle,
struct inode *inode,
@@ -514,8 +573,6 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
"logical block %lu\n", inode->i_ino, flags, map->m_len,
(unsigned long) map->m_lblk);
- ext4_es_lru_add(inode);
-
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
@@ -1061,13 +1118,10 @@ static int ext4_write_end(struct file *file,
}
}
- if (ext4_has_inline_data(inode)) {
- ret = ext4_write_inline_data_end(inode, pos, len,
- copied, page);
- if (ret < 0)
- goto errout;
- copied = ret;
- } else
+ if (ext4_has_inline_data(inode))
+ copied = ext4_write_inline_data_end(inode, pos, len,
+ copied, page);
+ else
copied = block_write_end(file, mapping, pos,
len, copied, page, fsdata);
@@ -1103,6 +1157,8 @@ static int ext4_write_end(struct file *file,
if (i_size_changed)
ext4_mark_inode_dirty(handle, inode);
+ if (copied < 0)
+ ret = copied;
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
@@ -1359,28 +1415,21 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
}
static void ext4_da_page_release_reservation(struct page *page,
- unsigned int offset,
- unsigned int length)
+ unsigned long offset)
{
int to_release = 0;
struct buffer_head *head, *bh;
unsigned int curr_off = 0;
struct inode *inode = page->mapping->host;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned int stop = offset + length;
int num_clusters;
ext4_fsblk_t lblk;
- BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
-
head = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
- if (next_off > stop)
- break;
-
if ((offset <= curr_off) && (buffer_delay(bh))) {
to_release++;
clear_buffer_delay(bh);
@@ -1411,43 +1460,140 @@ static void ext4_da_page_release_reservation(struct page *page,
* Delayed allocation stuff
*/
-struct mpage_da_data {
- struct inode *inode;
- struct writeback_control *wbc;
+/*
+ * mpage_da_submit_io - walks through extent of pages and try to write
+ * them with writepage() call back
+ *
+ * @mpd->inode: inode
+ * @mpd->first_page: first page of the extent
+ * @mpd->next_page: page after the last page of the extent
+ *
+ * By the time mpage_da_submit_io() is called we expect all blocks
+ * to be allocated. this may be wrong if allocation failed.
+ *
+ * As pages are already locked by write_cache_pages(), we can't use it
+ */
+static int mpage_da_submit_io(struct mpage_da_data *mpd,
+ struct ext4_map_blocks *map)
+{
+ struct pagevec pvec;
+ unsigned long index, end;
+ int ret = 0, err, nr_pages, i;
+ struct inode *inode = mpd->inode;
+ struct address_space *mapping = inode->i_mapping;
+ loff_t size = i_size_read(inode);
+ unsigned int len, block_start;
+ struct buffer_head *bh, *page_bufs = NULL;
+ sector_t pblock = 0, cur_logical = 0;
+ struct ext4_io_submit io_submit;
- pgoff_t first_page; /* The first page to write */
- pgoff_t next_page; /* Current page to examine */
- pgoff_t last_page; /* Last page to examine */
+ BUG_ON(mpd->next_page <= mpd->first_page);
+ memset(&io_submit, 0, sizeof(io_submit));
/*
- * Extent to map - this can be after first_page because that can be
- * fully mapped. We somewhat abuse m_flags to store whether the extent
- * is delalloc or unwritten.
+ * We need to start from the first_page to the next_page - 1
+ * to make sure we also write the mapped dirty buffer_heads.
+ * If we look at mpd->b_blocknr we would only be looking
+ * at the currently mapped buffer_heads.
*/
- struct ext4_map_blocks map;
- struct ext4_io_submit io_submit; /* IO submission data */
-};
+ index = mpd->first_page;
+ end = mpd->next_page - 1;
+
+ pagevec_init(&pvec, 0);
+ while (index <= end) {
+ nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
+ if (nr_pages == 0)
+ break;
+ for (i = 0; i < nr_pages; i++) {
+ int skip_page = 0;
+ struct page *page = pvec.pages[i];
+
+ index = page->index;
+ if (index > end)
+ break;
+
+ if (index == size >> PAGE_CACHE_SHIFT)
+ len = size & ~PAGE_CACHE_MASK;
+ else
+ len = PAGE_CACHE_SIZE;
+ if (map) {
+ cur_logical = index << (PAGE_CACHE_SHIFT -
+ inode->i_blkbits);
+ pblock = map->m_pblk + (cur_logical -
+ map->m_lblk);
+ }
+ index++;
+
+ BUG_ON(!PageLocked(page));
+ BUG_ON(PageWriteback(page));
+
+ bh = page_bufs = page_buffers(page);
+ block_start = 0;
+ do {
+ if (map && (cur_logical >= map->m_lblk) &&
+ (cur_logical <= (map->m_lblk +
+ (map->m_len - 1)))) {
+ if (buffer_delay(bh)) {
+ clear_buffer_delay(bh);
+ bh->b_blocknr = pblock;
+ }
+ if (buffer_unwritten(bh) ||
+ buffer_mapped(bh))
+ BUG_ON(bh->b_blocknr != pblock);
+ if (map->m_flags & EXT4_MAP_UNINIT)
+ set_buffer_uninit(bh);
+ clear_buffer_unwritten(bh);
+ }
+
+ /*
+ * skip page if block allocation undone and
+ * block is dirty
+ */
+ if (ext4_bh_delay_or_unwritten(NULL, bh))
+ skip_page = 1;
+ bh = bh->b_this_page;
+ block_start += bh->b_size;
+ cur_logical++;
+ pblock++;
+ } while (bh != page_bufs);
+
+ if (skip_page) {
+ unlock_page(page);
+ continue;
+ }
+
+ clear_page_dirty_for_io(page);
+ err = ext4_bio_write_page(&io_submit, page, len,
+ mpd->wbc);
+ if (!err)
+ mpd->pages_written++;
+ /*
+ * In error case, we have to continue because
+ * remaining pages are still locked
+ */
+ if (ret == 0)
+ ret = err;
+ }
+ pagevec_release(&pvec);
+ }
+ ext4_io_submit(&io_submit);
+ return ret;
+}
-static void mpage_release_unused_pages(struct mpage_da_data *mpd,
- bool invalidate)
+static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
{
int nr_pages, i;
pgoff_t index, end;
struct pagevec pvec;
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
-
- /* This is necessary when next_page == 0. */
- if (mpd->first_page >= mpd->next_page)
- return;
+ ext4_lblk_t start, last;
index = mpd->first_page;
end = mpd->next_page - 1;
- if (invalidate) {
- ext4_lblk_t start, last;
- start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- ext4_es_remove_extent(inode, start, last - start + 1);
- }
+
+ start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ ext4_es_remove_extent(inode, start, last - start + 1);
pagevec_init(&pvec, 0);
while (index <= end) {
@@ -1460,15 +1606,14 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
break;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
- if (invalidate) {
- block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
- ClearPageUptodate(page);
- }
+ block_invalidatepage(page, 0);
+ ClearPageUptodate(page);
unlock_page(page);
}
index = pvec.pages[nr_pages - 1]->index + 1;
pagevec_release(&pvec);
}
+ return;
}
static void ext4_print_free_blocks(struct inode *inode)
@@ -1497,6 +1642,215 @@ static void ext4_print_free_blocks(struct inode *inode)
return;
}
+/*
+ * mpage_da_map_and_submit - go through given space, map them
+ * if necessary, and then submit them for I/O
+ *
+ * @mpd - bh describing space
+ *
+ * The function skips space we know is already mapped to disk blocks.
+ *
+ */
+static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
+{
+ int err, blks, get_blocks_flags;
+ struct ext4_map_blocks map, *mapp = NULL;
+ sector_t next = mpd->b_blocknr;
+ unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
+ loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
+ handle_t *handle = NULL;
+
+ /*
+ * If the blocks are mapped already, or we couldn't accumulate
+ * any blocks, then proceed immediately to the submission stage.
+ */
+ if ((mpd->b_size == 0) ||
+ ((mpd->b_state & (1 << BH_Mapped)) &&
+ !(mpd->b_state & (1 << BH_Delay)) &&
+ !(mpd->b_state & (1 << BH_Unwritten))))
+ goto submit_io;
+
+ handle = ext4_journal_current_handle();
+ BUG_ON(!handle);
+
+ /*
+ * Call ext4_map_blocks() to allocate any delayed allocation
+ * blocks, or to convert an uninitialized extent to be
+ * initialized (in the case where we have written into
+ * one or more preallocated blocks).
+ *
+ * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
+ * indicate that we are on the delayed allocation path. This
+ * affects functions in many different parts of the allocation
+ * call path. This flag exists primarily because we don't
+ * want to change *many* call functions, so ext4_map_blocks()
+ * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
+ * inode's allocation semaphore is taken.
+ *
+ * If the blocks in questions were delalloc blocks, set
+ * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
+ * variables are updated after the blocks have been allocated.
+ */
+ map.m_lblk = next;
+ map.m_len = max_blocks;
+ /*
+ * We're in delalloc path and it is possible that we're going to
+ * need more metadata blocks than previously reserved. However
+ * we must not fail because we're in writeback and there is
+ * nothing we can do about it so it might result in data loss.
+ * So use reserved blocks to allocate metadata if possible.
+ */
+ get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
+ EXT4_GET_BLOCKS_METADATA_NOFAIL;
+ if (ext4_should_dioread_nolock(mpd->inode))
+ get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
+ if (mpd->b_state & (1 << BH_Delay))
+ get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
+
+
+ blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
+ if (blks < 0) {
+ struct super_block *sb = mpd->inode->i_sb;
+
+ err = blks;
+ /*
+ * If get block returns EAGAIN or ENOSPC and there
+ * appears to be free blocks we will just let
+ * mpage_da_submit_io() unlock all of the pages.
+ */
+ if (err == -EAGAIN)
+ goto submit_io;
+
+ if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
+ mpd->retval = err;
+ goto submit_io;
+ }
+
+ /*
+ * get block failure will cause us to loop in
+ * writepages, because a_ops->writepage won't be able
+ * to make progress. The page will be redirtied by
+ * writepage and writepages will again try to write
+ * the same.
+ */
+ if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
+ ext4_msg(sb, KERN_CRIT,
+ "delayed block allocation failed for inode %lu "
+ "at logical offset %llu with max blocks %zd "
+ "with error %d", mpd->inode->i_ino,
+ (unsigned long long) next,
+ mpd->b_size >> mpd->inode->i_blkbits, err);
+ ext4_msg(sb, KERN_CRIT,
+ "This should not happen!! Data will be lost");
+ if (err == -ENOSPC)
+ ext4_print_free_blocks(mpd->inode);
+ }
+ /* invalidate all the pages */
+ ext4_da_block_invalidatepages(mpd);
+
+ /* Mark this page range as having been completed */
+ mpd->io_done = 1;
+ return;
+ }
+ BUG_ON(blks == 0);
+
+ mapp = ↦
+ if (map.m_flags & EXT4_MAP_NEW) {
+ struct block_device *bdev = mpd->inode->i_sb->s_bdev;
+ int i;
+
+ for (i = 0; i < map.m_len; i++)
+ unmap_underlying_metadata(bdev, map.m_pblk + i);
+ }
+
+ /*
+ * Update on-disk size along with block allocation.
+ */
+ disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
+ if (disksize > i_size_read(mpd->inode))
+ disksize = i_size_read(mpd->inode);
+ if (disksize > EXT4_I(mpd->inode)->i_disksize) {
+ ext4_update_i_disksize(mpd->inode, disksize);
+ err = ext4_mark_inode_dirty(handle, mpd->inode);
+ if (err)
+ ext4_error(mpd->inode->i_sb,
+ "Failed to mark inode %lu dirty",
+ mpd->inode->i_ino);
+ }
+
+submit_io:
+ mpage_da_submit_io(mpd, mapp);
+ mpd->io_done = 1;
+}
+
+#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
+ (1 << BH_Delay) | (1 << BH_Unwritten))
+
+/*
+ * mpage_add_bh_to_extent - try to add one more block to extent of blocks
+ *
+ * @mpd->lbh - extent of blocks
+ * @logical - logical number of the block in the file
+ * @b_state - b_state of the buffer head added
+ *
+ * the function is used to collect contig. blocks in same state
+ */
+static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical,
+ unsigned long b_state)
+{
+ sector_t next;
+ int blkbits = mpd->inode->i_blkbits;
+ int nrblocks = mpd->b_size >> blkbits;
+
+ /*
+ * XXX Don't go larger than mballoc is willing to allocate
+ * This is a stopgap solution. We eventually need to fold
+ * mpage_da_submit_io() into this function and then call
+ * ext4_map_blocks() multiple times in a loop
+ */
+ if (nrblocks >= (8*1024*1024 >> blkbits))
+ goto flush_it;
+
+ /* check if the reserved journal credits might overflow */
+ if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) {
+ if (nrblocks >= EXT4_MAX_TRANS_DATA) {
+ /*
+ * With non-extent format we are limited by the journal
+ * credit available. Total credit needed to insert
+ * nrblocks contiguous blocks is dependent on the
+ * nrblocks. So limit nrblocks.
+ */
+ goto flush_it;
+ }
+ }
+ /*
+ * First block in the extent
+ */
+ if (mpd->b_size == 0) {
+ mpd->b_blocknr = logical;
+ mpd->b_size = 1 << blkbits;
+ mpd->b_state = b_state & BH_FLAGS;
+ return;
+ }
+
+ next = mpd->b_blocknr + nrblocks;
+ /*
+ * Can we merge the block to our big extent?
+ */
+ if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
+ mpd->b_size += 1 << blkbits;
+ return;
+ }
+
+flush_it:
+ /*
+ * We couldn't merge the block to our extent, so we
+ * need to flush current extent and start new one
+ */
+ mpage_da_map_and_submit(mpd);
+ return;
+}
+
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
{
return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
@@ -1529,8 +1883,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
"logical block %lu\n", inode->i_ino, map->m_len,
(unsigned long) map->m_lblk);
- ext4_es_lru_add(inode);
-
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, iblock, &es)) {
@@ -1804,7 +2156,7 @@ static int __ext4_journalled_writepage(struct page *page,
* lock so we have to do some magic.
*
* This function can get called via...
- * - ext4_writepages after taking page lock (have journal handle)
+ * - ext4_da_writepages after taking page lock (have journal handle)
* - journal_submit_inode_data_buffers (no journal handle)
* - shrink_page_list via the kswapd/direct reclaim (no journal handle)
* - grab_page_cache when doing write_begin (have journal handle)
@@ -1875,412 +2227,83 @@ static int ext4_writepage(struct page *page,
}
}
- if (PageChecked(page) && ext4_should_journal_data(inode))
- /*
- * It's mmapped pagecache. Add buffers and journal it. There
- * doesn't seem much point in redirtying the page here.
- */
- return __ext4_journalled_writepage(page, len);
-
- ext4_io_submit_init(&io_submit, wbc);
- io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
- if (!io_submit.io_end) {
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return -ENOMEM;
- }
- ret = ext4_bio_write_page(&io_submit, page, len, wbc);
- ext4_io_submit(&io_submit);
- /* Drop io_end reference we got from init */
- ext4_put_io_end_defer(io_submit.io_end);
- return ret;
-}
-
-#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
-
-/*
- * mballoc gives us at most this number of blocks...
- * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
- * The rest of mballoc seems to handle chunks upto full group size.
- */
-#define MAX_WRITEPAGES_EXTENT_LEN 2048
-
-/*
- * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
- *
- * @mpd - extent of blocks
- * @lblk - logical number of the block in the file
- * @b_state - b_state of the buffer head added
- *
- * the function is used to collect contig. blocks in same state
- */
-static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
- unsigned long b_state)
-{
- struct ext4_map_blocks *map = &mpd->map;
-
- /* Don't go larger than mballoc is willing to allocate */
- if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
- return 0;
-
- /* First block in the extent? */
- if (map->m_len == 0) {
- map->m_lblk = lblk;
- map->m_len = 1;
- map->m_flags = b_state & BH_FLAGS;
- return 1;
- }
-
- /* Can we merge the block to our big extent? */
- if (lblk == map->m_lblk + map->m_len &&
- (b_state & BH_FLAGS) == map->m_flags) {
- map->m_len++;
- return 1;
- }
- return 0;
-}
-
-static bool add_page_bufs_to_extent(struct mpage_da_data *mpd,
- struct buffer_head *head,
- struct buffer_head *bh,
- ext4_lblk_t lblk)
-{
- struct inode *inode = mpd->inode;
- ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
- >> inode->i_blkbits;
-
- do {
- BUG_ON(buffer_locked(bh));
-
- if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
- (!buffer_delay(bh) && !buffer_unwritten(bh)) ||
- lblk >= blocks) {
- /* Found extent to map? */
- if (mpd->map.m_len)
- return false;
- if (lblk >= blocks)
- return true;
- continue;
- }
- if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state))
- return false;
- } while (lblk++, (bh = bh->b_this_page) != head);
- return true;
-}
-
-static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
-{
- int len;
- loff_t size = i_size_read(mpd->inode);
- int err;
-
- BUG_ON(page->index != mpd->first_page);
- if (page->index == size >> PAGE_CACHE_SHIFT)
- len = size & ~PAGE_CACHE_MASK;
- else
- len = PAGE_CACHE_SIZE;
- clear_page_dirty_for_io(page);
- err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
- if (!err)
- mpd->wbc->nr_to_write--;
- mpd->first_page++;
-
- return err;
-}
-
-/*
- * mpage_map_buffers - update buffers corresponding to changed extent and
- * submit fully mapped pages for IO
- *
- * @mpd - description of extent to map, on return next extent to map
- *
- * Scan buffers corresponding to changed extent (we expect corresponding pages
- * to be already locked) and update buffer state according to new extent state.
- * We map delalloc buffers to their physical location, clear unwritten bits,
- * and mark buffers as uninit when we perform writes to uninitialized extents
- * and do extent conversion after IO is finished. If the last page is not fully
- * mapped, we update @map to the next extent in the last page that needs
- * mapping. Otherwise we submit the page for IO.
- */
-static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
-{
- struct pagevec pvec;
- int nr_pages, i;
- struct inode *inode = mpd->inode;
- struct buffer_head *head, *bh;
- int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
- ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
- >> inode->i_blkbits;
- pgoff_t start, end;
- ext4_lblk_t lblk;
- sector_t pblock;
- int err;
-
- start = mpd->map.m_lblk >> bpp_bits;
- end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
- lblk = start << bpp_bits;
- pblock = mpd->map.m_pblk;
-
- pagevec_init(&pvec, 0);
- while (start <= end) {
- nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
- PAGEVEC_SIZE);
- if (nr_pages == 0)
- break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
-
- if (page->index > end)
- break;
- /* Upto 'end' pages must be contiguous */
- BUG_ON(page->index != start);
- bh = head = page_buffers(page);
- do {
- if (lblk < mpd->map.m_lblk)
- continue;
- if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
- /*
- * Buffer after end of mapped extent.
- * Find next buffer in the page to map.
- */
- mpd->map.m_len = 0;
- mpd->map.m_flags = 0;
- add_page_bufs_to_extent(mpd, head, bh,
- lblk);
- pagevec_release(&pvec);
- return 0;
- }
- if (buffer_delay(bh)) {
- clear_buffer_delay(bh);
- bh->b_blocknr = pblock++;
- }
- clear_buffer_unwritten(bh);
- } while (++lblk < blocks &&
- (bh = bh->b_this_page) != head);
-
- /*
- * FIXME: This is going to break if dioread_nolock
- * supports blocksize < pagesize as we will try to
- * convert potentially unmapped parts of inode.
- */
- mpd->io_submit.io_end->size += PAGE_CACHE_SIZE;
- /* Page fully mapped - let IO run! */
- err = mpage_submit_page(mpd, page);
- if (err < 0) {
- pagevec_release(&pvec);
- return err;
- }
- start++;
- }
- pagevec_release(&pvec);
- }
- /* Extent fully mapped and matches with page boundary. We are done. */
- mpd->map.m_len = 0;
- mpd->map.m_flags = 0;
- return 0;
-}
-
-static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
-{
- struct inode *inode = mpd->inode;
- struct ext4_map_blocks *map = &mpd->map;
- int get_blocks_flags;
- int err;
-
- trace_ext4_da_write_pages_extent(inode, map);
- /*
- * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
- * to convert an uninitialized extent to be initialized (in the case
- * where we have written into one or more preallocated blocks). It is
- * possible that we're going to need more metadata blocks than
- * previously reserved. However we must not fail because we're in
- * writeback and there is nothing we can do about it so it might result
- * in data loss. So use reserved blocks to allocate metadata if
- * possible.
- *
- * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks
- * in question are delalloc blocks. This affects functions in many
- * different parts of the allocation call path. This flag exists
- * primarily because we don't want to change *many* call functions, so
- * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag
- * once the inode's allocation semaphore is taken.
- */
- get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
- EXT4_GET_BLOCKS_METADATA_NOFAIL;
- if (ext4_should_dioread_nolock(inode))
- get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
- if (map->m_flags & (1 << BH_Delay))
- get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
-
- err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
- if (err < 0)
- return err;
- if (map->m_flags & EXT4_MAP_UNINIT) {
- if (!mpd->io_submit.io_end->handle &&
- ext4_handle_valid(handle)) {
- mpd->io_submit.io_end->handle = handle->h_rsv_handle;
- handle->h_rsv_handle = NULL;
- }
- ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
- }
-
- BUG_ON(map->m_len == 0);
- if (map->m_flags & EXT4_MAP_NEW) {
- struct block_device *bdev = inode->i_sb->s_bdev;
- int i;
-
- for (i = 0; i < map->m_len; i++)
- unmap_underlying_metadata(bdev, map->m_pblk + i);
- }
- return 0;
-}
-
-/*
- * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
- * mpd->len and submit pages underlying it for IO
- *
- * @handle - handle for journal operations
- * @mpd - extent to map
- *
- * The function maps extent starting at mpd->lblk of length mpd->len. If it is
- * delayed, blocks are allocated, if it is unwritten, we may need to convert
- * them to initialized or split the described range from larger unwritten
- * extent. Note that we need not map all the described range since allocation
- * can return less blocks or the range is covered by more unwritten extents. We
- * cannot map more because we are limited by reserved transaction credits. On
- * the other hand we always make sure that the last touched page is fully
- * mapped so that it can be written out (and thus forward progress is
- * guaranteed). After mapping we submit all mapped pages for IO.
- */
-static int mpage_map_and_submit_extent(handle_t *handle,
- struct mpage_da_data *mpd,
- bool *give_up_on_write)
-{
- struct inode *inode = mpd->inode;
- struct ext4_map_blocks *map = &mpd->map;
- int err;
- loff_t disksize;
-
- mpd->io_submit.io_end->offset =
- ((loff_t)map->m_lblk) << inode->i_blkbits;
- while (map->m_len) {
- err = mpage_map_one_extent(handle, mpd);
- if (err < 0) {
- struct super_block *sb = inode->i_sb;
-
- if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
- goto invalidate_dirty_pages;
- /*
- * Let the uper layers retry transient errors.
- * In the case of ENOSPC, if ext4_count_free_blocks()
- * is non-zero, a commit should free up blocks.
- */
- if ((err == -ENOMEM) ||
- (err == -ENOSPC && ext4_count_free_clusters(sb)))
- return err;
- ext4_msg(sb, KERN_CRIT,
- "Delayed block allocation failed for "
- "inode %lu at logical offset %llu with"
- " max blocks %u with error %d",
- inode->i_ino,
- (unsigned long long)map->m_lblk,
- (unsigned)map->m_len, -err);
- ext4_msg(sb, KERN_CRIT,
- "This should not happen!! Data will "
- "be lost\n");
- if (err == -ENOSPC)
- ext4_print_free_blocks(inode);
- invalidate_dirty_pages:
- *give_up_on_write = true;
- return err;
- }
- /*
- * Update buffer state, submit mapped pages, and get us new
- * extent to map
- */
- err = mpage_map_and_submit_buffers(mpd);
- if (err < 0)
- return err;
- }
-
- /* Update on-disk size after IO is submitted */
- disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
- if (disksize > i_size_read(inode))
- disksize = i_size_read(inode);
- if (disksize > EXT4_I(inode)->i_disksize) {
- int err2;
+ if (PageChecked(page) && ext4_should_journal_data(inode))
+ /*
+ * It's mmapped pagecache. Add buffers and journal it. There
+ * doesn't seem much point in redirtying the page here.
+ */
+ return __ext4_journalled_writepage(page, len);
- ext4_update_i_disksize(inode, disksize);
- err2 = ext4_mark_inode_dirty(handle, inode);
- if (err2)
- ext4_error(inode->i_sb,
- "Failed to mark inode %lu dirty",
- inode->i_ino);
- if (!err)
- err = err2;
- }
- return err;
+ memset(&io_submit, 0, sizeof(io_submit));
+ ret = ext4_bio_write_page(&io_submit, page, len, wbc);
+ ext4_io_submit(&io_submit);
+ return ret;
}
/*
- * Calculate the total number of credits to reserve for one writepages
- * iteration. This is called from ext4_writepages(). We map an extent of
- * upto MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
- * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
- * bpp - 1 blocks in bpp different extents.
+ * This is called via ext4_da_writepages() to
+ * calculate the total number of credits to reserve to fit
+ * a single extent allocation into a single transaction,
+ * ext4_da_writpeages() will loop calling this before
+ * the block allocation.
*/
+
static int ext4_da_writepages_trans_blocks(struct inode *inode)
{
- int bpp = ext4_journal_blocks_per_page(inode);
+ int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+
+ /*
+ * With non-extent format the journal credit needed to
+ * insert nrblocks contiguous block is dependent on
+ * number of contiguous block. So we will limit
+ * number of contiguous block to a sane value
+ */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
+ (max_blocks > EXT4_MAX_TRANS_DATA))
+ max_blocks = EXT4_MAX_TRANS_DATA;
- return ext4_meta_trans_blocks(inode,
- MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
+ return ext4_chunk_trans_blocks(inode, max_blocks);
}
/*
- * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
- * and underlying extent to map
- *
- * @mpd - where to look for pages
- *
- * Walk dirty pages in the mapping. If they are fully mapped, submit them for
- * IO immediately. When we find a page which isn't mapped we start accumulating
- * extent of buffers underlying these pages that needs mapping (formed by
- * either delayed or unwritten buffers). We also lock the pages containing
- * these buffers. The extent found is returned in @mpd structure (starting at
- * mpd->lblk with length mpd->len blocks).
- *
- * Note that this function can attach bios to one io_end structure which are
- * neither logically nor physically contiguous. Although it may seem as an
- * unnecessary complication, it is actually inevitable in blocksize < pagesize
- * case as we need to track IO to all buffers underlying a page in one io_end.
+ * write_cache_pages_da - walk the list of dirty pages of the given
+ * address space and accumulate pages that need writing, and call
+ * mpage_da_map_and_submit to map a single contiguous memory region
+ * and then write them.
*/
-static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
+static int write_cache_pages_da(handle_t *handle,
+ struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct mpage_da_data *mpd,
+ pgoff_t *done_index)
{
- struct address_space *mapping = mpd->inode->i_mapping;
- struct pagevec pvec;
- unsigned int nr_pages;
- pgoff_t index = mpd->first_page;
- pgoff_t end = mpd->last_page;
- int tag;
- int i, err = 0;
- int blkbits = mpd->inode->i_blkbits;
- ext4_lblk_t lblk;
- struct buffer_head *head;
+ struct buffer_head *bh, *head;
+ struct inode *inode = mapping->host;
+ struct pagevec pvec;
+ unsigned int nr_pages;
+ sector_t logical;
+ pgoff_t index, end;
+ long nr_to_write = wbc->nr_to_write;
+ int i, tag, ret = 0;
+
+ memset(mpd, 0, sizeof(struct mpage_da_data));
+ mpd->wbc = wbc;
+ mpd->inode = inode;
+ pagevec_init(&pvec, 0);
+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
- if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
- pagevec_init(&pvec, 0);
- mpd->map.m_len = 0;
- mpd->next_page = index;
+ *done_index = index;
while (index <= end) {
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
- goto out;
+ return 0;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -2295,21 +2318,31 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
if (page->index > end)
goto out;
- /* If we can't merge this page, we are done. */
- if (mpd->map.m_len > 0 && mpd->next_page != page->index)
- goto out;
+ *done_index = page->index + 1;
+
+ /*
+ * If we can't merge this page, and we have
+ * accumulated an contiguous region, write it
+ */
+ if ((mpd->next_page != page->index) &&
+ (mpd->next_page != mpd->first_page)) {
+ mpage_da_map_and_submit(mpd);
+ goto ret_extent_tail;
+ }
lock_page(page);
+
/*
- * If the page is no longer dirty, or its mapping no
- * longer corresponds to inode we are writing (which
- * means it has been truncated or invalidated), or the
- * page is already under writeback and we are not doing
- * a data integrity writeback, skip the page
+ * If the page is no longer dirty, or its
+ * mapping no longer corresponds to inode we
+ * are writing (which means it has been
+ * truncated or invalidated), or the page is
+ * already under writeback and we are not
+ * doing a data integrity writeback, skip the page
*/
if (!PageDirty(page) ||
(PageWriteback(page) &&
- (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
+ (wbc->sync_mode == WB_SYNC_NONE)) ||
unlikely(page->mapping != mapping)) {
unlock_page(page);
continue;
@@ -2318,70 +2351,106 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
wait_on_page_writeback(page);
BUG_ON(PageWriteback(page));
- if (mpd->map.m_len == 0)
+ /*
+ * If we have inline data and arrive here, it means that
+ * we will soon create the block for the 1st page, so
+ * we'd better clear the inline data here.
+ */
+ if (ext4_has_inline_data(inode)) {
+ BUG_ON(ext4_test_inode_state(inode,
+ EXT4_STATE_MAY_INLINE_DATA));
+ ext4_destroy_inline_data(handle, inode);
+ }
+
+ if (mpd->next_page != page->index)
mpd->first_page = page->index;
mpd->next_page = page->index + 1;
+ logical = (sector_t) page->index <<
+ (PAGE_CACHE_SHIFT - inode->i_blkbits);
+
/* Add all dirty buffers to mpd */
- lblk = ((ext4_lblk_t)page->index) <<
- (PAGE_CACHE_SHIFT - blkbits);
head = page_buffers(page);
- if (!add_page_bufs_to_extent(mpd, head, head, lblk))
- goto out;
- /* So far everything mapped? Submit the page for IO. */
- if (mpd->map.m_len == 0) {
- err = mpage_submit_page(mpd, page);
- if (err < 0)
+ bh = head;
+ do {
+ BUG_ON(buffer_locked(bh));
+ /*
+ * We need to try to allocate unmapped blocks
+ * in the same page. Otherwise we won't make
+ * progress with the page in ext4_writepage
+ */
+ if (ext4_bh_delay_or_unwritten(NULL, bh)) {
+ mpage_add_bh_to_extent(mpd, logical,
+ bh->b_state);
+ if (mpd->io_done)
+ goto ret_extent_tail;
+ } else if (buffer_dirty(bh) &&
+ buffer_mapped(bh)) {
+ /*
+ * mapped dirty buffer. We need to
+ * update the b_state because we look
+ * at b_state in mpage_da_map_blocks.
+ * We don't update b_size because if we
+ * find an unmapped buffer_head later
+ * we need to use the b_state flag of
+ * that buffer_head.
+ */
+ if (mpd->b_size == 0)
+ mpd->b_state =
+ bh->b_state & BH_FLAGS;
+ }
+ logical++;
+ } while ((bh = bh->b_this_page) != head);
+
+ if (nr_to_write > 0) {
+ nr_to_write--;
+ if (nr_to_write == 0 &&
+ wbc->sync_mode == WB_SYNC_NONE)
+ /*
+ * We stop writing back only if we are
+ * not doing integrity sync. In case of
+ * integrity sync we have to keep going
+ * because someone may be concurrently
+ * dirtying pages, and we might have
+ * synced a lot of newly appeared dirty
+ * pages, but have not synced all of the
+ * old dirty pages.
+ */
goto out;
}
-
- /*
- * Accumulated enough dirty pages? This doesn't apply
- * to WB_SYNC_ALL mode. For integrity sync we have to
- * keep going because someone may be concurrently
- * dirtying pages, and we might have synced a lot of
- * newly appeared dirty pages, but have not synced all
- * of the old dirty pages.
- */
- if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
- mpd->next_page - mpd->first_page >=
- mpd->wbc->nr_to_write)
- goto out;
}
pagevec_release(&pvec);
cond_resched();
}
return 0;
+ret_extent_tail:
+ ret = MPAGE_DA_EXTENT_TAIL;
out:
pagevec_release(&pvec);
- return err;
-}
-
-static int __writepage(struct page *page, struct writeback_control *wbc,
- void *data)
-{
- struct address_space *mapping = data;
- int ret = ext4_writepage(page, wbc);
- mapping_set_error(mapping, ret);
+ cond_resched();
return ret;
}
-static int ext4_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
+
+static int ext4_da_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- pgoff_t writeback_index = 0;
- long nr_to_write = wbc->nr_to_write;
+ pgoff_t index;
int range_whole = 0;
- int cycled = 1;
handle_t *handle = NULL;
struct mpage_da_data mpd;
struct inode *inode = mapping->host;
- int needed_blocks, rsv_blocks = 0, ret = 0;
+ int pages_written = 0;
+ unsigned int max_pages;
+ int range_cyclic, cycled = 1, io_done = 0;
+ int needed_blocks, ret = 0;
+ long desired_nr_to_write, nr_to_writebump = 0;
+ loff_t range_start = wbc->range_start;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
- bool done;
+ pgoff_t done_index = 0;
+ pgoff_t end;
struct blk_plug plug;
- bool give_up_on_write = false;
- trace_ext4_writepages(inode, wbc);
+ trace_ext4_da_writepages(inode, wbc);
/*
* No pages to write? This is mainly a kludge to avoid starting
@@ -2391,165 +2460,164 @@ static int ext4_writepages(struct address_space *mapping,
if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
return 0;
- if (ext4_should_journal_data(inode)) {
- struct blk_plug plug;
- int ret;
-
- blk_start_plug(&plug);
- ret = write_cache_pages(mapping, wbc, __writepage, mapping);
- blk_finish_plug(&plug);
- return ret;
- }
-
/*
* If the filesystem has aborted, it is read-only, so return
* right away instead of dumping stack traces later on that
* will obscure the real source of the problem. We test
* EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
* the latter could be true if the filesystem is mounted
- * read-only, and in that case, ext4_writepages should
+ * read-only, and in that case, ext4_da_writepages should
* *never* be called, so if that ever happens, we would want
* the stack trace.
*/
if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
return -EROFS;
- if (ext4_should_dioread_nolock(inode)) {
- /*
- * We may need to convert upto one extent per block in
- * the page and we may dirty the inode.
- */
- rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
- }
-
- /*
- * If we have inline data and arrive here, it means that
- * we will soon create the block for the 1st page, so
- * we'd better clear the inline data here.
- */
- if (ext4_has_inline_data(inode)) {
- /* Just inode will be modified... */
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out_writepages;
- }
- BUG_ON(ext4_test_inode_state(inode,
- EXT4_STATE_MAY_INLINE_DATA));
- ext4_destroy_inline_data(handle, inode);
- ext4_journal_stop(handle);
- }
-
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
+ range_cyclic = wbc->range_cyclic;
if (wbc->range_cyclic) {
- writeback_index = mapping->writeback_index;
- if (writeback_index)
+ index = mapping->writeback_index;
+ if (index)
cycled = 0;
- mpd.first_page = writeback_index;
- mpd.last_page = -1;
+ wbc->range_start = index << PAGE_CACHE_SHIFT;
+ wbc->range_end = LLONG_MAX;
+ wbc->range_cyclic = 0;
+ end = -1;
} else {
- mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT;
- mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ }
+
+ /*
+ * This works around two forms of stupidity. The first is in
+ * the writeback code, which caps the maximum number of pages
+ * written to be 1024 pages. This is wrong on multiple
+ * levels; different architectues have a different page size,
+ * which changes the maximum amount of data which gets
+ * written. Secondly, 4 megabytes is way too small. XFS
+ * forces this value to be 16 megabytes by multiplying
+ * nr_to_write parameter by four, and then relies on its
+ * allocator to allocate larger extents to make them
+ * contiguous. Unfortunately this brings us to the second
+ * stupidity, which is that ext4's mballoc code only allocates
+ * at most 2048 blocks. So we force contiguous writes up to
+ * the number of dirty blocks in the inode, or
+ * sbi->max_writeback_mb_bump whichever is smaller.
+ */
+ max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
+ if (!range_cyclic && range_whole) {
+ if (wbc->nr_to_write == LONG_MAX)
+ desired_nr_to_write = wbc->nr_to_write;
+ else
+ desired_nr_to_write = wbc->nr_to_write * 8;
+ } else
+ desired_nr_to_write = ext4_num_dirty_pages(inode, index,
+ max_pages);
+ if (desired_nr_to_write > max_pages)
+ desired_nr_to_write = max_pages;
+
+ if (wbc->nr_to_write < desired_nr_to_write) {
+ nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
+ wbc->nr_to_write = desired_nr_to_write;
}
- mpd.inode = inode;
- mpd.wbc = wbc;
- ext4_io_submit_init(&mpd.io_submit, wbc);
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
- done = false;
+ tag_pages_for_writeback(mapping, index, end);
+
blk_start_plug(&plug);
- while (!done && mpd.first_page <= mpd.last_page) {
- /* For each extent of pages we use new io_end */
- mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
- if (!mpd.io_submit.io_end) {
- ret = -ENOMEM;
- break;
- }
+ while (!ret && wbc->nr_to_write > 0) {
/*
- * We have two constraints: We find one extent to map and we
- * must always write out whole page (makes a difference when
- * blocksize < pagesize) so that we don't block on IO when we
- * try to write out the rest of the page. Journalled mode is
- * not supported by delalloc.
+ * we insert one extent at a time. So we need
+ * credit needed for single extent allocation.
+ * journalled mode is currently not supported
+ * by delalloc
*/
BUG_ON(ext4_should_journal_data(inode));
needed_blocks = ext4_da_writepages_trans_blocks(inode);
- /* start a new transaction */
- handle = ext4_journal_start_with_reserve(inode,
- EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
+ /* start a new transaction*/
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
+ needed_blocks);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
"%ld pages, ino %lu; err %d", __func__,
wbc->nr_to_write, inode->i_ino, ret);
- /* Release allocated io_end */
- ext4_put_io_end(mpd.io_submit.io_end);
- break;
+ blk_finish_plug(&plug);
+ goto out_writepages;
}
- trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
- ret = mpage_prepare_extent_to_map(&mpd);
- if (!ret) {
- if (mpd.map.m_len)
- ret = mpage_map_and_submit_extent(handle, &mpd,
- &give_up_on_write);
- else {
- /*
- * We scanned the whole range (or exhausted
- * nr_to_write), submitted what was mapped and
- * didn't find anything needing mapping. We are
- * done.
- */
- done = true;
- }
+ /*
+ * Now call write_cache_pages_da() to find the next
+ * contiguous region of logical blocks that need
+ * blocks to be allocated by ext4 and submit them.
+ */
+ ret = write_cache_pages_da(handle, mapping,
+ wbc, &mpd, &done_index);
+ /*
+ * If we have a contiguous extent of pages and we
+ * haven't done the I/O yet, map the blocks and submit
+ * them for I/O.
+ */
+ if (!mpd.io_done && mpd.next_page != mpd.first_page) {
+ mpage_da_map_and_submit(&mpd);
+ ret = MPAGE_DA_EXTENT_TAIL;
}
+ trace_ext4_da_write_pages(inode, &mpd);
+ wbc->nr_to_write -= mpd.pages_written;
+
ext4_journal_stop(handle);
- /* Submit prepared bio */
- ext4_io_submit(&mpd.io_submit);
- /* Unlock pages we didn't use */
- mpage_release_unused_pages(&mpd, give_up_on_write);
- /* Drop our io_end reference we got from init */
- ext4_put_io_end(mpd.io_submit.io_end);
-
- if (ret == -ENOSPC && sbi->s_journal) {
- /*
- * Commit the transaction which would
+
+ if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
+ /* commit the transaction which would
* free blocks released in the transaction
* and try again
*/
jbd2_journal_force_commit_nested(sbi->s_journal);
ret = 0;
- continue;
- }
- /* Fatal error - ENOMEM, EIO... */
- if (ret)
+ } else if (ret == MPAGE_DA_EXTENT_TAIL) {
+ /*
+ * Got one extent now try with rest of the pages.
+ * If mpd.retval is set -EIO, journal is aborted.
+ * So we don't need to write any more.
+ */
+ pages_written += mpd.pages_written;
+ ret = mpd.retval;
+ io_done = 1;
+ } else if (wbc->nr_to_write)
+ /*
+ * There is no more writeout needed
+ * or we requested for a noblocking writeout
+ * and we found the device congested
+ */
break;
}
blk_finish_plug(&plug);
- if (!ret && !cycled) {
+ if (!io_done && !cycled) {
cycled = 1;
- mpd.last_page = writeback_index - 1;
- mpd.first_page = 0;
+ index = 0;
+ wbc->range_start = index << PAGE_CACHE_SHIFT;
+ wbc->range_end = mapping->writeback_index - 1;
goto retry;
}
/* Update index */
+ wbc->range_cyclic = range_cyclic;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
/*
- * Set the writeback_index so that range_cyclic
+ * set the writeback_index so that range_cyclic
* mode will write it back later
*/
- mapping->writeback_index = mpd.first_page;
+ mapping->writeback_index = done_index;
out_writepages:
- trace_ext4_writepages_result(inode, wbc, ret,
- nr_to_write - wbc->nr_to_write);
+ wbc->nr_to_write -= nr_to_writebump;
+ wbc->range_start = range_start;
+ trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
return ret;
}
@@ -2761,8 +2829,7 @@ static int ext4_da_write_end(struct file *file,
return ret ? ret : copied;
}
-static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
{
/*
* Drop reserved blocks
@@ -2771,10 +2838,10 @@ static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
if (!page_has_buffers(page))
goto out;
- ext4_da_page_release_reservation(page, offset, length);
+ ext4_da_page_release_reservation(page, offset);
out:
- ext4_invalidatepage(page, offset, length);
+ ext4_invalidatepage(page, offset);
return;
}
@@ -2797,7 +2864,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
* laptop_mode, not even desirable). However, to do otherwise
* would require replicating code paths in:
*
- * ext4_writepages() ->
+ * ext4_da_writepages() ->
* write_cache_pages() ---> (via passed in callback function)
* __mpage_da_writepage() -->
* mpage_add_bh_to_extent()
@@ -2922,40 +2989,37 @@ ext4_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
}
-static void ext4_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void ext4_invalidatepage(struct page *page, unsigned long offset)
{
- trace_ext4_invalidatepage(page, offset, length);
+ trace_ext4_invalidatepage(page, offset);
/* No journalling happens on data buffers when this function is used */
WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
- block_invalidatepage(page, offset, length);
+ block_invalidatepage(page, offset);
}
static int __ext4_journalled_invalidatepage(struct page *page,
- unsigned int offset,
- unsigned int length)
+ unsigned long offset)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
- trace_ext4_journalled_invalidatepage(page, offset, length);
+ trace_ext4_journalled_invalidatepage(page, offset);
/*
* If it's a full truncate we just forget about the pending dirtying
*/
- if (offset == 0 && length == PAGE_CACHE_SIZE)
+ if (offset == 0)
ClearPageChecked(page);
- return jbd2_journal_invalidatepage(journal, page, offset, length);
+ return jbd2_journal_invalidatepage(journal, page, offset);
}
/* Wrapper for aops... */
static void ext4_journalled_invalidatepage(struct page *page,
- unsigned int offset,
- unsigned int length)
+ unsigned long offset)
{
- WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
+ WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0);
}
static int ext4_releasepage(struct page *page, gfp_t wait)
@@ -3003,13 +3067,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
struct inode *inode = file_inode(iocb->ki_filp);
ext4_io_end_t *io_end = iocb->private;
- /* if not async direct IO just return */
- if (!io_end) {
- inode_dio_done(inode);
- if (is_async)
- aio_complete(iocb, ret, 0);
- return;
- }
+ /* if not async direct IO or dio with 0 bytes write, just return */
+ if (!io_end || !size)
+ goto out;
ext_debug("ext4_end_io_dio(): io_end 0x%p "
"for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3017,13 +3077,25 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
size);
iocb->private = NULL;
+
+ /* if not aio dio with unwritten extents, just free io and return */
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ ext4_free_io_end(io_end);
+out:
+ inode_dio_done(inode);
+ if (is_async)
+ aio_complete(iocb, ret, 0);
+ return;
+ }
+
io_end->offset = offset;
io_end->size = size;
if (is_async) {
io_end->iocb = iocb;
io_end->result = ret;
}
- ext4_put_io_end_defer(io_end);
+
+ ext4_add_complete_io(io_end);
}
/*
@@ -3057,7 +3129,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
get_block_t *get_block_func = NULL;
int dio_flags = 0;
loff_t final_size = offset + count;
- ext4_io_end_t *io_end = NULL;
/* Use the old path for reads and writes beyond i_size. */
if (rw != WRITE || final_size > inode->i_size)
@@ -3065,18 +3136,11 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
BUG_ON(iocb->private == NULL);
- /*
- * Make all waiters for direct IO properly wait also for extent
- * conversion. This also disallows race between truncate() and
- * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
- */
- if (rw == WRITE)
- atomic_inc(&inode->i_dio_count);
-
/* If we do a overwrite dio, i_mutex locking can be released */
overwrite = *((int *)iocb->private);
if (overwrite) {
+ atomic_inc(&inode->i_dio_count);
down_read(&EXT4_I(inode)->i_data_sem);
mutex_unlock(&inode->i_mutex);
}
@@ -3103,16 +3167,13 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
iocb->private = NULL;
ext4_inode_aio_set(inode, NULL);
if (!is_sync_kiocb(iocb)) {
- io_end = ext4_init_io_end(inode, GFP_NOFS);
+ ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_end) {
ret = -ENOMEM;
goto retake_lock;
}
io_end->flag |= EXT4_IO_END_DIRECT;
- /*
- * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
- */
- iocb->private = ext4_get_io_end(io_end);
+ iocb->private = io_end;
/*
* we save the io structure for current async direct
* IO, so that later ext4_map_blocks() could flag the
@@ -3136,42 +3197,33 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
NULL,
dio_flags);
+ if (iocb->private)
+ ext4_inode_aio_set(inode, NULL);
/*
- * Put our reference to io_end. This can free the io_end structure e.g.
- * in sync IO case or in case of error. It can even perform extent
- * conversion if all bios we submitted finished before we got here.
- * Note that in that case iocb->private can be already set to NULL
- * here.
+ * The io_end structure takes a reference to the inode, that
+ * structure needs to be destroyed and the reference to the
+ * inode need to be dropped, when IO is complete, even with 0
+ * byte write, or failed.
+ *
+ * In the successful AIO DIO case, the io_end structure will
+ * be destroyed and the reference to the inode will be dropped
+ * after the end_io call back function is called.
+ *
+ * In the case there is 0 byte write, or error case, since VFS
+ * direct IO won't invoke the end_io call back function, we
+ * need to free the end_io structure here.
*/
- if (io_end) {
- ext4_inode_aio_set(inode, NULL);
- ext4_put_io_end(io_end);
- /*
- * When no IO was submitted ext4_end_io_dio() was not
- * called so we have to put iocb's reference.
- */
- if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
- WARN_ON(iocb->private != io_end);
- WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
- WARN_ON(io_end->iocb);
- /*
- * Generic code already did inode_dio_done() so we
- * have to clear EXT4_IO_END_DIRECT to not do it for
- * the second time.
- */
- io_end->flag = 0;
- ext4_put_io_end(io_end);
- iocb->private = NULL;
- }
- }
- if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
+ if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
+ ext4_free_io_end(iocb->private);
+ iocb->private = NULL;
+ } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN)) {
int err;
/*
* for non AIO case, since the IO is already
* completed, we could do the conversion right here
*/
- err = ext4_convert_unwritten_extents(NULL, inode,
+ err = ext4_convert_unwritten_extents(inode,
offset, ret);
if (err < 0)
ret = err;
@@ -3179,10 +3231,9 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
}
retake_lock:
- if (rw == WRITE)
- inode_dio_done(inode);
/* take i_mutex locking again if we do a ovewrite dio */
if (overwrite) {
+ inode_dio_done(inode);
up_read(&EXT4_I(inode)->i_data_sem);
mutex_lock(&inode->i_mutex);
}
@@ -3241,7 +3292,6 @@ static const struct address_space_operations ext4_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_write_end,
.bmap = ext4_bmap,
@@ -3257,7 +3307,6 @@ static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty,
@@ -3273,7 +3322,7 @@ static const struct address_space_operations ext4_da_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .writepages = ext4_writepages,
+ .writepages = ext4_da_writepages,
.write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end,
.bmap = ext4_bmap,
@@ -3306,56 +3355,89 @@ void ext4_set_aops(struct inode *inode)
inode->i_mapping->a_ops = &ext4_aops;
}
+
/*
- * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
- * up to the end of the block which corresponds to `from'.
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
+ * ext4_discard_partial_page_buffers()
+ * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
+ * This function finds and locks the page containing the offset
+ * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
+ * Calling functions that already have the page locked should call
+ * ext4_discard_partial_page_buffers_no_lock directly.
*/
-int ext4_block_truncate_page(handle_t *handle,
- struct address_space *mapping, loff_t from)
+int ext4_discard_partial_page_buffers(handle_t *handle,
+ struct address_space *mapping, loff_t from,
+ loff_t length, int flags)
{
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
- unsigned length;
- unsigned blocksize;
struct inode *inode = mapping->host;
+ struct page *page;
+ int err = 0;
- blocksize = inode->i_sb->s_blocksize;
- length = blocksize - (offset & (blocksize - 1));
+ page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
+ mapping_gfp_mask(mapping) & ~__GFP_FS);
+ if (!page)
+ return -ENOMEM;
+
+ err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
+ from, length, flags);
- return ext4_block_zero_page_range(handle, mapping, from, length);
+ unlock_page(page);
+ page_cache_release(page);
+ return err;
}
/*
- * ext4_block_zero_page_range() zeros out a mapping of length 'length'
- * starting from file offset 'from'. The range to be zero'd must
- * be contained with in one block. If the specified range exceeds
- * the end of the block it will be shortened to end of the block
- * that cooresponds to 'from'
+ * ext4_discard_partial_page_buffers_no_lock()
+ * Zeros a page range of length 'length' starting from offset 'from'.
+ * Buffer heads that correspond to the block aligned regions of the
+ * zeroed range will be unmapped. Unblock aligned regions
+ * will have the corresponding buffer head mapped if needed so that
+ * that region of the page can be updated with the partial zero out.
+ *
+ * This function assumes that the page has already been locked. The
+ * The range to be discarded must be contained with in the given page.
+ * If the specified range exceeds the end of the page it will be shortened
+ * to the end of the page that corresponds to 'from'. This function is
+ * appropriate for updating a page and it buffer heads to be unmapped and
+ * zeroed for blocks that have been either released, or are going to be
+ * released.
+ *
+ * handle: The journal handle
+ * inode: The files inode
+ * page: A locked page that contains the offset "from"
+ * from: The starting byte offset (from the beginning of the file)
+ * to begin discarding
+ * len: The length of bytes to discard
+ * flags: Optional flags that may be used:
+ *
+ * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
+ * Only zero the regions of the page whose buffer heads
+ * have already been unmapped. This flag is appropriate
+ * for updating the contents of a page whose blocks may
+ * have already been released, and we only want to zero
+ * out the regions that correspond to those released blocks.
+ *
+ * Returns zero on success or negative on failure.
*/
-int ext4_block_zero_page_range(handle_t *handle,
- struct address_space *mapping, loff_t from, loff_t length)
+static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
+ struct inode *inode, struct page *page, loff_t from,
+ loff_t length, int flags)
{
ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
- unsigned blocksize, max, pos;
+ unsigned int offset = from & (PAGE_CACHE_SIZE-1);
+ unsigned int blocksize, max, pos;
ext4_lblk_t iblock;
- struct inode *inode = mapping->host;
struct buffer_head *bh;
- struct page *page;
int err = 0;
- page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
- mapping_gfp_mask(mapping) & ~__GFP_FS);
- if (!page)
- return -ENOMEM;
-
blocksize = inode->i_sb->s_blocksize;
- max = blocksize - (offset & (blocksize - 1));
+ max = PAGE_CACHE_SIZE - offset;
+
+ if (index != page->index)
+ return -EINVAL;
/*
* correct length if it does not fall between
- * 'from' and the end of the block
+ * 'from' and the end of the page
*/
if (length > max || length < 0)
length = max;
@@ -3373,91 +3455,106 @@ int ext4_block_zero_page_range(handle_t *handle,
iblock++;
pos += blocksize;
}
- if (buffer_freed(bh)) {
- BUFFER_TRACE(bh, "freed: skip");
- goto unlock;
- }
- if (!buffer_mapped(bh)) {
- BUFFER_TRACE(bh, "unmapped");
- ext4_get_block(inode, iblock, bh, 0);
- /* unmapped? It's a hole - nothing to do */
- if (!buffer_mapped(bh)) {
- BUFFER_TRACE(bh, "still unmapped");
- goto unlock;
- }
- }
-
- /* Ok, it's mapped. Make sure it's up-to-date */
- if (PageUptodate(page))
- set_buffer_uptodate(bh);
- if (!buffer_uptodate(bh)) {
- err = -EIO;
- ll_rw_block(READ, 1, &bh);
- wait_on_buffer(bh);
- /* Uhhuh. Read error. Complain and punt. */
- if (!buffer_uptodate(bh))
- goto unlock;
- }
- if (ext4_should_journal_data(inode)) {
- BUFFER_TRACE(bh, "get write access");
- err = ext4_journal_get_write_access(handle, bh);
- if (err)
- goto unlock;
- }
- zero_user(page, offset, length);
- BUFFER_TRACE(bh, "zeroed end of block");
+ pos = offset;
+ while (pos < offset + length) {
+ unsigned int end_of_block, range_to_discard;
- if (ext4_should_journal_data(inode)) {
- err = ext4_handle_dirty_metadata(handle, inode, bh);
- } else {
err = 0;
- mark_buffer_dirty(bh);
- if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE))
- err = ext4_jbd2_file_inode(handle, inode);
- }
-unlock:
- unlock_page(page);
- page_cache_release(page);
- return err;
-}
+ /* The length of space left to zero and unmap */
+ range_to_discard = offset + length - pos;
-int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
- loff_t lstart, loff_t length)
-{
- struct super_block *sb = inode->i_sb;
- struct address_space *mapping = inode->i_mapping;
- unsigned partial_start, partial_end;
- ext4_fsblk_t start, end;
- loff_t byte_end = (lstart + length - 1);
- int err = 0;
+ /* The length of space until the end of the block */
+ end_of_block = blocksize - (pos & (blocksize-1));
- partial_start = lstart & (sb->s_blocksize - 1);
- partial_end = byte_end & (sb->s_blocksize - 1);
+ /*
+ * Do not unmap or zero past end of block
+ * for this buffer head
+ */
+ if (range_to_discard > end_of_block)
+ range_to_discard = end_of_block;
- start = lstart >> sb->s_blocksize_bits;
- end = byte_end >> sb->s_blocksize_bits;
- /* Handle partial zero within the single block */
- if (start == end &&
- (partial_start || (partial_end != sb->s_blocksize - 1))) {
- err = ext4_block_zero_page_range(handle, mapping,
- lstart, length);
- return err;
- }
- /* Handle partial zero out on the start of the range */
- if (partial_start) {
- err = ext4_block_zero_page_range(handle, mapping,
- lstart, sb->s_blocksize);
- if (err)
- return err;
+ /*
+ * Skip this buffer head if we are only zeroing unampped
+ * regions of the page
+ */
+ if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
+ buffer_mapped(bh))
+ goto next;
+
+ /* If the range is block aligned, unmap */
+ if (range_to_discard == blocksize) {
+ clear_buffer_dirty(bh);
+ bh->b_bdev = NULL;
+ clear_buffer_mapped(bh);
+ clear_buffer_req(bh);
+ clear_buffer_new(bh);
+ clear_buffer_delay(bh);
+ clear_buffer_unwritten(bh);
+ clear_buffer_uptodate(bh);
+ zero_user(page, pos, range_to_discard);
+ BUFFER_TRACE(bh, "Buffer discarded");
+ goto next;
+ }
+
+ /*
+ * If this block is not completely contained in the range
+ * to be discarded, then it is not going to be released. Because
+ * we need to keep this block, we need to make sure this part
+ * of the page is uptodate before we modify it by writeing
+ * partial zeros on it.
+ */
+ if (!buffer_mapped(bh)) {
+ /*
+ * Buffer head must be mapped before we can read
+ * from the block
+ */
+ BUFFER_TRACE(bh, "unmapped");
+ ext4_get_block(inode, iblock, bh, 0);
+ /* unmapped? It's a hole - nothing to do */
+ if (!buffer_mapped(bh)) {
+ BUFFER_TRACE(bh, "still unmapped");
+ goto next;
+ }
+ }
+
+ /* Ok, it's mapped. Make sure it's up-to-date */
+ if (PageUptodate(page))
+ set_buffer_uptodate(bh);
+
+ if (!buffer_uptodate(bh)) {
+ err = -EIO;
+ ll_rw_block(READ, 1, &bh);
+ wait_on_buffer(bh);
+ /* Uhhuh. Read error. Complain and punt.*/
+ if (!buffer_uptodate(bh))
+ goto next;
+ }
+
+ if (ext4_should_journal_data(inode)) {
+ BUFFER_TRACE(bh, "get write access");
+ err = ext4_journal_get_write_access(handle, bh);
+ if (err)
+ goto next;
+ }
+
+ zero_user(page, pos, range_to_discard);
+
+ err = 0;
+ if (ext4_should_journal_data(inode)) {
+ err = ext4_handle_dirty_metadata(handle, inode, bh);
+ } else
+ mark_buffer_dirty(bh);
+
+ BUFFER_TRACE(bh, "Partial buffer zeroed");
+next:
+ bh = bh->b_this_page;
+ iblock++;
+ pos += range_to_discard;
}
- /* Handle partial zero out on the end of the range */
- if (partial_end != sb->s_blocksize - 1)
- err = ext4_block_zero_page_range(handle, mapping,
- byte_end - partial_end,
- partial_end + 1);
+
return err;
}
@@ -3483,12 +3580,14 @@ int ext4_can_truncate(struct inode *inode)
* Returns: 0 on success or negative on failure
*/
-int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
{
+ struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
- loff_t first_block_offset, last_block_offset;
+ loff_t first_page, last_page, page_len;
+ loff_t first_page_offset, last_page_offset;
handle_t *handle;
unsigned int credits;
int ret = 0;
@@ -3539,16 +3638,23 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
offset;
}
- first_block_offset = round_up(offset, sb->s_blocksize);
- last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
+ first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ last_page = (offset + length) >> PAGE_CACHE_SHIFT;
- /* Now release the pages and zero block aligned part of pages*/
- if (last_block_offset > first_block_offset)
- truncate_pagecache_range(inode, first_block_offset,
- last_block_offset);
+ first_page_offset = first_page << PAGE_CACHE_SHIFT;
+ last_page_offset = last_page << PAGE_CACHE_SHIFT;
+
+ /* Now release the pages */
+ if (last_page_offset > first_page_offset) {
+ truncate_pagecache_range(inode, first_page_offset,
+ last_page_offset - 1);
+ }
/* Wait all existing dio workers, newcomers will block on i_mutex */
ext4_inode_block_unlocked_dio(inode);
+ ret = ext4_flush_unwritten_io(inode);
+ if (ret)
+ goto out_dio;
inode_dio_wait(inode);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
@@ -3562,10 +3668,66 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
goto out_dio;
}
- ret = ext4_zero_partial_blocks(handle, inode, offset,
- length);
- if (ret)
- goto out_stop;
+ /*
+ * Now we need to zero out the non-page-aligned data in the
+ * pages at the start and tail of the hole, and unmap the
+ * buffer heads for the block aligned regions of the page that
+ * were completely zeroed.
+ */
+ if (first_page > last_page) {
+ /*
+ * If the file space being truncated is contained
+ * within a page just zero out and unmap the middle of
+ * that page
+ */
+ ret = ext4_discard_partial_page_buffers(handle,
+ mapping, offset, length, 0);
+
+ if (ret)
+ goto out_stop;
+ } else {
+ /*
+ * zero out and unmap the partial page that contains
+ * the start of the hole
+ */
+ page_len = first_page_offset - offset;
+ if (page_len > 0) {
+ ret = ext4_discard_partial_page_buffers(handle, mapping,
+ offset, page_len, 0);
+ if (ret)
+ goto out_stop;
+ }
+
+ /*
+ * zero out and unmap the partial page that contains
+ * the end of the hole
+ */
+ page_len = offset + length - last_page_offset;
+ if (page_len > 0) {
+ ret = ext4_discard_partial_page_buffers(handle, mapping,
+ last_page_offset, page_len, 0);
+ if (ret)
+ goto out_stop;
+ }
+ }
+
+ /*
+ * If i_size is contained in the last page, we need to
+ * unmap and zero the partial page after i_size
+ */
+ if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
+ inode->i_size % PAGE_CACHE_SIZE != 0) {
+ page_len = PAGE_CACHE_SIZE -
+ (inode->i_size & (PAGE_CACHE_SIZE - 1));
+
+ if (page_len > 0) {
+ ret = ext4_discard_partial_page_buffers(handle,
+ mapping, inode->i_size, page_len, 0);
+
+ if (ret)
+ goto out_stop;
+ }
+ }
first_block = (offset + sb->s_blocksize - 1) >>
EXT4_BLOCK_SIZE_BITS(sb);
@@ -3641,6 +3803,7 @@ void ext4_truncate(struct inode *inode)
unsigned int credits;
handle_t *handle;
struct address_space *mapping = inode->i_mapping;
+ loff_t page_len;
/*
* There is a possibility that we're either freeing the inode
@@ -3667,6 +3830,12 @@ void ext4_truncate(struct inode *inode)
return;
}
+ /*
+ * finish any pending end_io work so we won't run the risk of
+ * converting any truncated blocks to initialized later
+ */
+ ext4_flush_unwritten_io(inode);
+
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
@@ -3678,8 +3847,14 @@ void ext4_truncate(struct inode *inode)
return;
}
- if (inode->i_size & (inode->i_sb->s_blocksize - 1))
- ext4_block_truncate_page(handle, mapping, inode->i_size);
+ if (inode->i_size % PAGE_CACHE_SIZE != 0) {
+ page_len = PAGE_CACHE_SIZE -
+ (inode->i_size & (PAGE_CACHE_SIZE - 1));
+
+ if (ext4_discard_partial_page_buffers(handle,
+ mapping, inode->i_size, page_len, 0))
+ goto out_stop;
+ }
/*
* We add the inode to the orphan list, so that if this
@@ -4448,8 +4623,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
inode->i_size >> PAGE_CACHE_SHIFT);
if (!page)
return;
- ret = __ext4_journalled_invalidatepage(page, offset,
- PAGE_CACHE_SIZE - offset);
+ ret = __ext4_journalled_invalidatepage(page, offset);
unlock_page(page);
page_cache_release(page);
if (ret != -EBUSY)
@@ -4631,7 +4805,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct inode *inode;
- unsigned long long delalloc_blocks;
+ unsigned long delalloc_blocks;
inode = dentry->d_inode;
generic_fillattr(inode, stat);
@@ -4649,16 +4823,15 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
EXT4_I(inode)->i_reserved_data_blocks);
- stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9);
+ stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
return 0;
}
-static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
- int pextents)
+static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- return ext4_ind_trans_blocks(inode, lblocks);
- return ext4_ext_index_trans_blocks(inode, pextents);
+ return ext4_ind_trans_blocks(inode, nrblocks, chunk);
+ return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
}
/*
@@ -4672,8 +4845,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
*
* Also account for superblock, inode, quota and xattr blocks
*/
-static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
- int pextents)
+static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
int gdpblocks;
@@ -4681,10 +4853,14 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
int ret = 0;
/*
- * How many index blocks need to touch to map @lblocks logical blocks
- * to @pextents physical extents?
+ * How many index blocks need to touch to modify nrblocks?
+ * The "Chunk" flag indicating whether the nrblocks is
+ * physically contiguous on disk
+ *
+ * For Direct IO and fallocate, they calls get_block to allocate
+ * one single extent at a time, so they could set the "Chunk" flag
*/
- idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
+ idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
ret = idxblocks;
@@ -4692,7 +4868,12 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
* Now let's see how many group bitmaps and group descriptors need
* to account
*/
- groups = idxblocks + pextents;
+ groups = idxblocks;
+ if (chunk)
+ groups += 1;
+ else
+ groups += nrblocks;
+
gdpblocks = groups;
if (groups > ngroups)
groups = ngroups;
@@ -4723,7 +4904,7 @@ int ext4_writepage_trans_blocks(struct inode *inode)
int bpp = ext4_journal_blocks_per_page(inode);
int ret;
- ret = ext4_meta_trans_blocks(inode, bpp, bpp);
+ ret = ext4_meta_trans_blocks(inode, bpp, 0);
/* Account for data blocks for journalled mode */
if (ext4_should_journal_data(inode))
diff --git a/trunk/fs/ext4/mballoc.c b/trunk/fs/ext4/mballoc.c
index a9ff5e5137ca..def84082a9a9 100644
--- a/trunk/fs/ext4/mballoc.c
+++ b/trunk/fs/ext4/mballoc.c
@@ -2105,7 +2105,6 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
group = ac->ac_g_ex.fe_group;
for (i = 0; i < ngroups; group++, i++) {
- cond_resched();
/*
* Artificially restricted ngroups for non-extent
* files makes group > ngroups possible on first loop.
@@ -4406,20 +4405,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
repeat:
/* allocate space in core */
*errp = ext4_mb_regular_allocator(ac);
- if (*errp)
- goto discard_and_exit;
-
- /* as we've just preallocated more space than
- * user requested originally, we store allocated
- * space in a special descriptor */
- if (ac->ac_status == AC_STATUS_FOUND &&
- ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
- *errp = ext4_mb_new_preallocation(ac);
if (*errp) {
- discard_and_exit:
ext4_discard_allocated_blocks(ac);
goto errout;
}
+
+ /* as we've just preallocated more space than
+ * user requested orinally, we store allocated
+ * space in a special descriptor */
+ if (ac->ac_status == AC_STATUS_FOUND &&
+ ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
+ ext4_mb_new_preallocation(ac);
}
if (likely(ac->ac_status == AC_STATUS_FOUND)) {
*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
@@ -4616,11 +4612,10 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
BUG_ON(bh && (count > 1));
for (i = 0; i < count; i++) {
- cond_resched();
if (!bh)
tbh = sb_find_get_block(inode->i_sb,
block + i);
- if (!tbh)
+ if (unlikely(!tbh))
continue;
ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
inode, tbh, block + i);
diff --git a/trunk/fs/ext4/move_extent.c b/trunk/fs/ext4/move_extent.c
index e86dddbd8296..3dcbf364022f 100644
--- a/trunk/fs/ext4/move_extent.c
+++ b/trunk/fs/ext4/move_extent.c
@@ -912,6 +912,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
struct page *pagep[2] = {NULL, NULL};
handle_t *handle;
ext4_lblk_t orig_blk_offset;
+ long long offs = orig_page_offset << PAGE_CACHE_SHIFT;
unsigned long blocksize = orig_inode->i_sb->s_blocksize;
unsigned int w_flags = 0;
unsigned int tmp_data_size, data_size, replaced_size;
@@ -939,6 +940,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
orig_blk_offset = orig_page_offset * blocks_per_page +
data_offset_in_page;
+ offs = (long long)orig_blk_offset << orig_inode->i_blkbits;
+
/* Calculate data_size */
if ((orig_blk_offset + block_len_in_page - 1) ==
((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
diff --git a/trunk/fs/ext4/namei.c b/trunk/fs/ext4/namei.c
index ab2f6dc44b3a..6653fc35ecb7 100644
--- a/trunk/fs/ext4/namei.c
+++ b/trunk/fs/ext4/namei.c
@@ -918,8 +918,11 @@ static int htree_dirblock_to_tree(struct file *dir_file,
bh->b_data, bh->b_size,
(block<i_sb))
+ ((char *)de - bh->b_data))) {
- /* silently ignore the rest of the block */
- break;
+ /* On error, skip the f_pos to the next block. */
+ dir_file->f_pos = (dir_file->f_pos |
+ (dir->i_sb->s_blocksize - 1)) + 1;
+ brelse(bh);
+ return count;
}
ext4fs_dirhash(de->name, de->name_len, hinfo);
if ((hinfo->hash < start_hash) ||
diff --git a/trunk/fs/ext4/page-io.c b/trunk/fs/ext4/page-io.c
index 48786cdb5e6c..4acf1f78881b 100644
--- a/trunk/fs/ext4/page-io.c
+++ b/trunk/fs/ext4/page-io.c
@@ -46,121 +46,46 @@ void ext4_exit_pageio(void)
}
/*
- * Print an buffer I/O error compatible with the fs/buffer.c. This
- * provides compatibility with dmesg scrapers that look for a specific
- * buffer I/O error message. We really need a unified error reporting
- * structure to userspace ala Digital Unix's uerf system, but it's
- * probably not going to happen in my lifetime, due to LKML politics...
+ * This function is called by ext4_evict_inode() to make sure there is
+ * no more pending I/O completion work left to do.
*/
-static void buffer_io_error(struct buffer_head *bh)
+void ext4_ioend_shutdown(struct inode *inode)
{
- char b[BDEVNAME_SIZE];
- printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
- bdevname(bh->b_bdev, b),
- (unsigned long long)bh->b_blocknr);
-}
-
-static void ext4_finish_bio(struct bio *bio)
-{
- int i;
- int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
-
- for (i = 0; i < bio->bi_vcnt; i++) {
- struct bio_vec *bvec = &bio->bi_io_vec[i];
- struct page *page = bvec->bv_page;
- struct buffer_head *bh, *head;
- unsigned bio_start = bvec->bv_offset;
- unsigned bio_end = bio_start + bvec->bv_len;
- unsigned under_io = 0;
- unsigned long flags;
-
- if (!page)
- continue;
+ wait_queue_head_t *wq = ext4_ioend_wq(inode);
- if (error) {
- SetPageError(page);
- set_bit(AS_EIO, &page->mapping->flags);
- }
- bh = head = page_buffers(page);
- /*
- * We check all buffers in the page under BH_Uptodate_Lock
- * to avoid races with other end io clearing async_write flags
- */
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
- do {
- if (bh_offset(bh) < bio_start ||
- bh_offset(bh) + bh->b_size > bio_end) {
- if (buffer_async_write(bh))
- under_io++;
- continue;
- }
- clear_buffer_async_write(bh);
- if (error)
- buffer_io_error(bh);
- } while ((bh = bh->b_this_page) != head);
- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
- local_irq_restore(flags);
- if (!under_io)
- end_page_writeback(page);
- }
-}
-
-static void ext4_release_io_end(ext4_io_end_t *io_end)
-{
- struct bio *bio, *next_bio;
-
- BUG_ON(!list_empty(&io_end->list));
- BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
- WARN_ON(io_end->handle);
-
- if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
- wake_up_all(ext4_ioend_wq(io_end->inode));
-
- for (bio = io_end->bio; bio; bio = next_bio) {
- next_bio = bio->bi_private;
- ext4_finish_bio(bio);
- bio_put(bio);
- }
- if (io_end->flag & EXT4_IO_END_DIRECT)
- inode_dio_done(io_end->inode);
- if (io_end->iocb)
- aio_complete(io_end->iocb, io_end->result, 0);
- kmem_cache_free(io_end_cachep, io_end);
+ wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
+ /*
+ * We need to make sure the work structure is finished being
+ * used before we let the inode get destroyed.
+ */
+ if (work_pending(&EXT4_I(inode)->i_unwritten_work))
+ cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
}
-static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
+void ext4_free_io_end(ext4_io_end_t *io)
{
- struct inode *inode = io_end->inode;
+ BUG_ON(!io);
+ BUG_ON(!list_empty(&io->list));
+ BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
- io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
- /* Wake up anyone waiting on unwritten extent conversion */
- if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
- wake_up_all(ext4_ioend_wq(inode));
+ if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
+ wake_up_all(ext4_ioend_wq(io->inode));
+ kmem_cache_free(io_end_cachep, io);
}
-/*
- * Check a range of space and convert unwritten extents to written. Note that
- * we are protected from truncate touching same part of extent tree by the
- * fact that truncate code waits for all DIO to finish (thus exclusion from
- * direct IO is achieved) and also waits for PageWriteback bits. Thus we
- * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
- * completed (happens from ext4_free_ioend()).
- */
+/* check a range of space and convert unwritten extents to written. */
static int ext4_end_io(ext4_io_end_t *io)
{
struct inode *inode = io->inode;
loff_t offset = io->offset;
ssize_t size = io->size;
- handle_t *handle = io->handle;
int ret = 0;
ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
"list->prev 0x%p\n",
io, inode->i_ino, io->list.next, io->list.prev);
- io->handle = NULL; /* Following call will use up the handle */
- ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
+ ret = ext4_convert_unwritten_extents(inode, offset, size);
if (ret < 0) {
ext4_msg(inode->i_sb, KERN_EMERG,
"failed to convert unwritten extents to written "
@@ -168,22 +93,30 @@ static int ext4_end_io(ext4_io_end_t *io)
"(inode %lu, offset %llu, size %zd, error %d)",
inode->i_ino, offset, size, ret);
}
- ext4_clear_io_unwritten_flag(io);
- ext4_release_io_end(io);
+ /* Wake up anyone waiting on unwritten extent conversion */
+ if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
+ wake_up_all(ext4_ioend_wq(inode));
+ if (io->flag & EXT4_IO_END_DIRECT)
+ inode_dio_done(inode);
+ if (io->iocb)
+ aio_complete(io->iocb, io->result, 0);
return ret;
}
-static void dump_completed_IO(struct inode *inode, struct list_head *head)
+static void dump_completed_IO(struct inode *inode)
{
#ifdef EXT4FS_DEBUG
struct list_head *cur, *before, *after;
ext4_io_end_t *io, *io0, *io1;
- if (list_empty(head))
+ if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
+ ext4_debug("inode %lu completed_io list is empty\n",
+ inode->i_ino);
return;
+ }
- ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
- list_for_each_entry(io, head, list) {
+ ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino);
+ list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) {
cur = &io->list;
before = cur->prev;
io0 = container_of(before, ext4_io_end_t, list);
@@ -197,30 +130,23 @@ static void dump_completed_IO(struct inode *inode, struct list_head *head)
}
/* Add the io_end to per-inode completed end_io list. */
-static void ext4_add_complete_io(ext4_io_end_t *io_end)
+void ext4_add_complete_io(ext4_io_end_t *io_end)
{
struct ext4_inode_info *ei = EXT4_I(io_end->inode);
struct workqueue_struct *wq;
unsigned long flags;
BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
+ wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
+
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- if (io_end->handle) {
- wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
- if (list_empty(&ei->i_rsv_conversion_list))
- queue_work(wq, &ei->i_rsv_conversion_work);
- list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
- } else {
- wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq;
- if (list_empty(&ei->i_unrsv_conversion_list))
- queue_work(wq, &ei->i_unrsv_conversion_work);
- list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list);
- }
+ if (list_empty(&ei->i_completed_io_list))
+ queue_work(wq, &ei->i_unwritten_work);
+ list_add_tail(&io_end->list, &ei->i_completed_io_list);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
}
-static int ext4_do_flush_completed_IO(struct inode *inode,
- struct list_head *head)
+static int ext4_do_flush_completed_IO(struct inode *inode)
{
ext4_io_end_t *io;
struct list_head unwritten;
@@ -229,8 +155,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
int err, ret = 0;
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- dump_completed_IO(inode, head);
- list_replace_init(head, &unwritten);
+ dump_completed_IO(inode);
+ list_replace_init(&ei->i_completed_io_list, &unwritten);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
while (!list_empty(&unwritten)) {
@@ -241,25 +167,30 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
err = ext4_end_io(io);
if (unlikely(!ret && err))
ret = err;
+ io->flag &= ~EXT4_IO_END_UNWRITTEN;
+ ext4_free_io_end(io);
}
return ret;
}
/*
- * work on completed IO, to convert unwritten extents to extents
+ * work on completed aio dio IO, to convert unwritten extents to extents
*/
-void ext4_end_io_rsv_work(struct work_struct *work)
+void ext4_end_io_work(struct work_struct *work)
{
struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
- i_rsv_conversion_work);
- ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
+ i_unwritten_work);
+ ext4_do_flush_completed_IO(&ei->vfs_inode);
}
-void ext4_end_io_unrsv_work(struct work_struct *work)
+int ext4_flush_unwritten_io(struct inode *inode)
{
- struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
- i_unrsv_conversion_work);
- ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list);
+ int ret;
+ WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
+ !(inode->i_state & I_FREEING));
+ ret = ext4_do_flush_completed_IO(inode);
+ ext4_unwritten_wait(inode);
+ return ret;
}
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
@@ -269,70 +200,83 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
atomic_inc(&EXT4_I(inode)->i_ioend_count);
io->inode = inode;
INIT_LIST_HEAD(&io->list);
- atomic_set(&io->count, 1);
}
return io;
}
-void ext4_put_io_end_defer(ext4_io_end_t *io_end)
-{
- if (atomic_dec_and_test(&io_end->count)) {
- if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
- ext4_release_io_end(io_end);
- return;
- }
- ext4_add_complete_io(io_end);
- }
-}
-
-int ext4_put_io_end(ext4_io_end_t *io_end)
-{
- int err = 0;
-
- if (atomic_dec_and_test(&io_end->count)) {
- if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
- err = ext4_convert_unwritten_extents(io_end->handle,
- io_end->inode, io_end->offset,
- io_end->size);
- io_end->handle = NULL;
- ext4_clear_io_unwritten_flag(io_end);
- }
- ext4_release_io_end(io_end);
- }
- return err;
-}
-
-ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
+/*
+ * Print an buffer I/O error compatible with the fs/buffer.c. This
+ * provides compatibility with dmesg scrapers that look for a specific
+ * buffer I/O error message. We really need a unified error reporting
+ * structure to userspace ala Digital Unix's uerf system, but it's
+ * probably not going to happen in my lifetime, due to LKML politics...
+ */
+static void buffer_io_error(struct buffer_head *bh)
{
- atomic_inc(&io_end->count);
- return io_end;
+ char b[BDEVNAME_SIZE];
+ printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
+ bdevname(bh->b_bdev, b),
+ (unsigned long long)bh->b_blocknr);
}
static void ext4_end_bio(struct bio *bio, int error)
{
ext4_io_end_t *io_end = bio->bi_private;
+ struct inode *inode;
+ int i;
+ int blocksize;
sector_t bi_sector = bio->bi_sector;
BUG_ON(!io_end);
+ inode = io_end->inode;
+ blocksize = 1 << inode->i_blkbits;
+ bio->bi_private = NULL;
bio->bi_end_io = NULL;
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
error = 0;
+ for (i = 0; i < bio->bi_vcnt; i++) {
+ struct bio_vec *bvec = &bio->bi_io_vec[i];
+ struct page *page = bvec->bv_page;
+ struct buffer_head *bh, *head;
+ unsigned bio_start = bvec->bv_offset;
+ unsigned bio_end = bio_start + bvec->bv_len;
+ unsigned under_io = 0;
+ unsigned long flags;
- if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+ if (!page)
+ continue;
+
+ if (error) {
+ SetPageError(page);
+ set_bit(AS_EIO, &page->mapping->flags);
+ }
+ bh = head = page_buffers(page);
/*
- * Link bio into list hanging from io_end. We have to do it
- * atomically as bio completions can be racing against each
- * other.
+ * We check all buffers in the page under BH_Uptodate_Lock
+ * to avoid races with other end io clearing async_write flags
*/
- bio->bi_private = xchg(&io_end->bio, bio);
- } else {
- ext4_finish_bio(bio);
- bio_put(bio);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
+ do {
+ if (bh_offset(bh) < bio_start ||
+ bh_offset(bh) + blocksize > bio_end) {
+ if (buffer_async_write(bh))
+ under_io++;
+ continue;
+ }
+ clear_buffer_async_write(bh);
+ if (error)
+ buffer_io_error(bh);
+ } while ((bh = bh->b_this_page) != head);
+ bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
+ local_irq_restore(flags);
+ if (!under_io)
+ end_page_writeback(page);
}
+ bio_put(bio);
if (error) {
- struct inode *inode = io_end->inode;
-
+ io_end->flag |= EXT4_IO_END_ERROR;
ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
"(offset %llu size %ld starting block %llu)",
inode->i_ino,
@@ -341,7 +285,13 @@ static void ext4_end_bio(struct bio *bio, int error)
(unsigned long long)
bi_sector >> (inode->i_blkbits - 9));
}
- ext4_put_io_end_defer(io_end);
+
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ ext4_free_io_end(io_end);
+ return;
+ }
+
+ ext4_add_complete_io(io_end);
}
void ext4_io_submit(struct ext4_io_submit *io)
@@ -355,38 +305,43 @@ void ext4_io_submit(struct ext4_io_submit *io)
bio_put(io->io_bio);
}
io->io_bio = NULL;
-}
-
-void ext4_io_submit_init(struct ext4_io_submit *io,
- struct writeback_control *wbc)
-{
- io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
- io->io_bio = NULL;
+ io->io_op = 0;
io->io_end = NULL;
}
-static int io_submit_init_bio(struct ext4_io_submit *io,
- struct buffer_head *bh)
+static int io_submit_init(struct ext4_io_submit *io,
+ struct inode *inode,
+ struct writeback_control *wbc,
+ struct buffer_head *bh)
{
+ ext4_io_end_t *io_end;
+ struct page *page = bh->b_page;
int nvecs = bio_get_nr_vecs(bh->b_bdev);
struct bio *bio;
- bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
- if (!bio)
+ io_end = ext4_init_io_end(inode, GFP_NOFS);
+ if (!io_end)
return -ENOMEM;
+ bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
+ bio->bi_private = io->io_end = io_end;
bio->bi_end_io = ext4_end_bio;
- bio->bi_private = ext4_get_io_end(io->io_end);
+
+ io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
+
io->io_bio = bio;
+ io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
io->io_next_block = bh->b_blocknr;
return 0;
}
static int io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode,
+ struct writeback_control *wbc,
struct buffer_head *bh)
{
+ ext4_io_end_t *io_end;
int ret;
if (io->io_bio && bh->b_blocknr != io->io_next_block) {
@@ -394,14 +349,18 @@ static int io_submit_add_bh(struct ext4_io_submit *io,
ext4_io_submit(io);
}
if (io->io_bio == NULL) {
- ret = io_submit_init_bio(io, bh);
+ ret = io_submit_init(io, inode, wbc, bh);
if (ret)
return ret;
}
+ io_end = io->io_end;
+ if (test_clear_buffer_uninit(bh))
+ ext4_set_io_unwritten_flag(inode, io_end);
+ io->io_end->size += bh->b_size;
+ io->io_next_block++;
ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
if (ret != bh->b_size)
goto submit_and_retry;
- io->io_next_block++;
return 0;
}
@@ -473,7 +432,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
do {
if (!buffer_async_write(bh))
continue;
- ret = io_submit_add_bh(io, inode, bh);
+ ret = io_submit_add_bh(io, inode, wbc, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
diff --git a/trunk/fs/ext4/resize.c b/trunk/fs/ext4/resize.c
index c5adbb318a90..b27c96d01965 100644
--- a/trunk/fs/ext4/resize.c
+++ b/trunk/fs/ext4/resize.c
@@ -79,20 +79,12 @@ static int verify_group_input(struct super_block *sb,
ext4_fsblk_t end = start + input->blocks_count;
ext4_group_t group = input->group;
ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
- unsigned overhead;
- ext4_fsblk_t metaend;
+ unsigned overhead = ext4_group_overhead_blocks(sb, group);
+ ext4_fsblk_t metaend = start + overhead;
struct buffer_head *bh = NULL;
ext4_grpblk_t free_blocks_count, offset;
int err = -EINVAL;
- if (group != sbi->s_groups_count) {
- ext4_warning(sb, "Cannot add at group %u (only %u groups)",
- input->group, sbi->s_groups_count);
- return -EINVAL;
- }
-
- overhead = ext4_group_overhead_blocks(sb, group);
- metaend = start + overhead;
input->free_blocks_count = free_blocks_count =
input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
@@ -104,7 +96,10 @@ static int verify_group_input(struct super_block *sb,
free_blocks_count, input->reserved_blocks);
ext4_get_group_no_and_offset(sb, start, NULL, &offset);
- if (offset != 0)
+ if (group != sbi->s_groups_count)
+ ext4_warning(sb, "Cannot add at group %u (only %u groups)",
+ input->group, sbi->s_groups_count);
+ else if (offset != 0)
ext4_warning(sb, "Last group not full");
else if (input->reserved_blocks > input->blocks_count / 5)
ext4_warning(sb, "Reserved blocks too high (%u)",
@@ -1556,10 +1551,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
struct inode *inode = NULL;
- int gdb_off;
+ int gdb_off, gdb_num;
int err;
__u16 bg_flags = 0;
+ gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
@@ -1660,10 +1656,12 @@ static int ext4_group_extend_no_check(struct super_block *sb,
err = err2;
if (!err) {
+ ext4_fsblk_t first_block;
+ first_block = ext4_group_first_block_no(sb, 0);
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
"blocks\n", ext4_blocks_count(es));
- update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
+ update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr - first_block,
(char *)es, sizeof(struct ext4_super_block), 0);
}
return err;
diff --git a/trunk/fs/ext4/super.c b/trunk/fs/ext4/super.c
index 85b3dd60169b..94cc84db7c9a 100644
--- a/trunk/fs/ext4/super.c
+++ b/trunk/fs/ext4/super.c
@@ -69,7 +69,6 @@ static void ext4_mark_recovery_complete(struct super_block *sb,
static void ext4_clear_journal_err(struct super_block *sb,
struct ext4_super_block *es);
static int ext4_sync_fs(struct super_block *sb, int wait);
-static int ext4_sync_fs_nojournal(struct super_block *sb, int wait);
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
static int ext4_unfreeze(struct super_block *sb);
@@ -399,11 +398,6 @@ static void ext4_handle_error(struct super_block *sb)
}
if (test_opt(sb, ERRORS_RO)) {
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
- /*
- * Make sure updated value of ->s_mount_flags will be visible
- * before ->s_flags update
- */
- smp_wmb();
sb->s_flags |= MS_RDONLY;
}
if (test_opt(sb, ERRORS_PANIC))
@@ -428,9 +422,9 @@ void __ext4_error(struct super_block *sb, const char *function,
ext4_handle_error(sb);
}
-void __ext4_error_inode(struct inode *inode, const char *function,
- unsigned int line, ext4_fsblk_t block,
- const char *fmt, ...)
+void ext4_error_inode(struct inode *inode, const char *function,
+ unsigned int line, ext4_fsblk_t block,
+ const char *fmt, ...)
{
va_list args;
struct va_format vaf;
@@ -457,9 +451,9 @@ void __ext4_error_inode(struct inode *inode, const char *function,
ext4_handle_error(inode->i_sb);
}
-void __ext4_error_file(struct file *file, const char *function,
- unsigned int line, ext4_fsblk_t block,
- const char *fmt, ...)
+void ext4_error_file(struct file *file, const char *function,
+ unsigned int line, ext4_fsblk_t block,
+ const char *fmt, ...)
{
va_list args;
struct va_format vaf;
@@ -576,13 +570,8 @@ void __ext4_abort(struct super_block *sb, const char *function,
if ((sb->s_flags & MS_RDONLY) == 0) {
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
- EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
- /*
- * Make sure updated value of ->s_mount_flags will be visible
- * before ->s_flags update
- */
- smp_wmb();
sb->s_flags |= MS_RDONLY;
+ EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
if (EXT4_SB(sb)->s_journal)
jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
save_error_info(sb, function, line);
@@ -591,8 +580,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
panic("EXT4-fs panic from previous error\n");
}
-void __ext4_msg(struct super_block *sb,
- const char *prefix, const char *fmt, ...)
+void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -762,10 +750,8 @@ static void ext4_put_super(struct super_block *sb)
ext4_unregister_li_request(sb);
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
- flush_workqueue(sbi->unrsv_conversion_wq);
- flush_workqueue(sbi->rsv_conversion_wq);
- destroy_workqueue(sbi->unrsv_conversion_wq);
- destroy_workqueue(sbi->rsv_conversion_wq);
+ flush_workqueue(sbi->dio_unwritten_wq);
+ destroy_workqueue(sbi->dio_unwritten_wq);
if (sbi->s_journal) {
err = jbd2_journal_destroy(sbi->s_journal);
@@ -774,7 +760,7 @@ static void ext4_put_super(struct super_block *sb)
ext4_abort(sb, "Couldn't clean up the journal");
}
- ext4_es_unregister_shrinker(sbi);
+ ext4_es_unregister_shrinker(sb);
del_timer(&sbi->s_err_report);
ext4_release_system_zone(sb);
ext4_mb_release(sb);
@@ -863,7 +849,6 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
rwlock_init(&ei->i_es_lock);
INIT_LIST_HEAD(&ei->i_es_lru);
ei->i_es_lru_nr = 0;
- ei->i_touch_when = 0;
ei->i_reserved_data_blocks = 0;
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
@@ -874,15 +859,13 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_reserved_quota = 0;
#endif
ei->jinode = NULL;
- INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
- INIT_LIST_HEAD(&ei->i_unrsv_conversion_list);
+ INIT_LIST_HEAD(&ei->i_completed_io_list);
spin_lock_init(&ei->i_completed_io_lock);
ei->i_sync_tid = 0;
ei->i_datasync_tid = 0;
atomic_set(&ei->i_ioend_count, 0);
atomic_set(&ei->i_unwritten, 0);
- INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
- INIT_WORK(&ei->i_unrsv_conversion_work, ext4_end_io_unrsv_work);
+ INIT_WORK(&ei->i_unwritten_work, ext4_end_io_work);
return &ei->vfs_inode;
}
@@ -1110,7 +1093,6 @@ static const struct super_operations ext4_nojournal_sops = {
.dirty_inode = ext4_dirty_inode,
.drop_inode = ext4_drop_inode,
.evict_inode = ext4_evict_inode,
- .sync_fs = ext4_sync_fs_nojournal,
.put_super = ext4_put_super,
.statfs = ext4_statfs,
.remount_fs = ext4_remount,
@@ -1926,6 +1908,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_desc *gdp = NULL;
ext4_group_t flex_group;
+ unsigned int groups_per_flex = 0;
int i, err;
sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
@@ -1933,6 +1916,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
sbi->s_log_groups_per_flex = 0;
return 1;
}
+ groups_per_flex = 1U << sbi->s_log_groups_per_flex;
err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
if (err)
@@ -2180,22 +2164,19 @@ static void ext4_orphan_cleanup(struct super_block *sb,
list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
dquot_initialize(inode);
if (inode->i_nlink) {
- if (test_opt(sb, DEBUG))
- ext4_msg(sb, KERN_DEBUG,
- "%s: truncating inode %lu to %lld bytes",
- __func__, inode->i_ino, inode->i_size);
+ ext4_msg(sb, KERN_DEBUG,
+ "%s: truncating inode %lu to %lld bytes",
+ __func__, inode->i_ino, inode->i_size);
jbd_debug(2, "truncating inode %lu to %lld bytes\n",
inode->i_ino, inode->i_size);
mutex_lock(&inode->i_mutex);
- truncate_inode_pages(inode->i_mapping, inode->i_size);
ext4_truncate(inode);
mutex_unlock(&inode->i_mutex);
nr_truncates++;
} else {
- if (test_opt(sb, DEBUG))
- ext4_msg(sb, KERN_DEBUG,
- "%s: deleting unreferenced inode %lu",
- __func__, inode->i_ino);
+ ext4_msg(sb, KERN_DEBUG,
+ "%s: deleting unreferenced inode %lu",
+ __func__, inode->i_ino);
jbd_debug(2, "deleting unreferenced inode %lu\n",
inode->i_ino);
nr_orphans++;
@@ -2396,10 +2377,7 @@ struct ext4_attr {
ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *);
ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
const char *, size_t);
- union {
- int offset;
- int deprecated_val;
- } u;
+ int offset;
};
static int parse_strtoull(const char *buf,
@@ -2468,7 +2446,7 @@ static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
static ssize_t sbi_ui_show(struct ext4_attr *a,
struct ext4_sb_info *sbi, char *buf)
{
- unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset);
+ unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
}
@@ -2477,7 +2455,7 @@ static ssize_t sbi_ui_store(struct ext4_attr *a,
struct ext4_sb_info *sbi,
const char *buf, size_t count)
{
- unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset);
+ unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
unsigned long t;
int ret;
@@ -2526,20 +2504,12 @@ static ssize_t trigger_test_error(struct ext4_attr *a,
return count;
}
-static ssize_t sbi_deprecated_show(struct ext4_attr *a,
- struct ext4_sb_info *sbi, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", a->u.deprecated_val);
-}
-
#define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \
static struct ext4_attr ext4_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
- .u = { \
- .offset = offsetof(struct ext4_sb_info, _elname),\
- }, \
+ .offset = offsetof(struct ext4_sb_info, _elname), \
}
#define EXT4_ATTR(name, mode, show, store) \
static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
@@ -2550,14 +2520,6 @@ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
#define EXT4_RW_ATTR_SBI_UI(name, elname) \
EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname)
#define ATTR_LIST(name) &ext4_attr_##name.attr
-#define EXT4_DEPRECATED_ATTR(_name, _val) \
-static struct ext4_attr ext4_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = 0444 }, \
- .show = sbi_deprecated_show, \
- .u = { \
- .deprecated_val = _val, \
- }, \
-}
EXT4_RO_ATTR(delayed_allocation_blocks);
EXT4_RO_ATTR(session_write_kbytes);
@@ -2572,7 +2534,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
-EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128);
+EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
@@ -3801,7 +3763,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_err_report.data = (unsigned long) sb;
/* Register extent status tree shrinker */
- ext4_es_register_shrinker(sbi);
+ ext4_es_register_shrinker(sb);
err = percpu_counter_init(&sbi->s_freeclusters_counter,
ext4_count_free_clusters(sb));
@@ -3825,6 +3787,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
sbi->s_stripe = ext4_get_stripe_size(sbi);
+ sbi->s_max_writeback_mb_bump = 128;
sbi->s_extent_max_zeroout_kb = 32;
/*
@@ -3952,20 +3915,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
* The maximum number of concurrent works can be high and
* concurrency isn't really necessary. Limit it to 1.
*/
- EXT4_SB(sb)->rsv_conversion_wq =
- alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
- if (!EXT4_SB(sb)->rsv_conversion_wq) {
- printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
- ret = -ENOMEM;
- goto failed_mount4;
- }
-
- EXT4_SB(sb)->unrsv_conversion_wq =
- alloc_workqueue("ext4-unrsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
- if (!EXT4_SB(sb)->unrsv_conversion_wq) {
- printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
+ EXT4_SB(sb)->dio_unwritten_wq =
+ alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!EXT4_SB(sb)->dio_unwritten_wq) {
+ printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
ret = -ENOMEM;
- goto failed_mount4;
+ goto failed_mount_wq;
}
/*
@@ -4119,17 +4074,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sb->s_root = NULL;
failed_mount4:
ext4_msg(sb, KERN_ERR, "mount failed");
- if (EXT4_SB(sb)->rsv_conversion_wq)
- destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
- if (EXT4_SB(sb)->unrsv_conversion_wq)
- destroy_workqueue(EXT4_SB(sb)->unrsv_conversion_wq);
+ destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
failed_mount_wq:
if (sbi->s_journal) {
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
failed_mount3:
- ext4_es_unregister_shrinker(sbi);
+ ext4_es_unregister_shrinker(sb);
del_timer(&sbi->s_err_report);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
@@ -4565,52 +4517,19 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
{
int ret = 0;
tid_t target;
- bool needs_barrier = false;
struct ext4_sb_info *sbi = EXT4_SB(sb);
trace_ext4_sync_fs(sb, wait);
- flush_workqueue(sbi->rsv_conversion_wq);
- flush_workqueue(sbi->unrsv_conversion_wq);
+ flush_workqueue(sbi->dio_unwritten_wq);
/*
* Writeback quota in non-journalled quota case - journalled quota has
* no dirty dquots
*/
dquot_writeback_dquots(sb, -1);
- /*
- * Data writeback is possible w/o journal transaction, so barrier must
- * being sent at the end of the function. But we can skip it if
- * transaction_commit will do it for us.
- */
- target = jbd2_get_latest_transaction(sbi->s_journal);
- if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
- !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
- needs_barrier = true;
-
if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
if (wait)
- ret = jbd2_log_wait_commit(sbi->s_journal, target);
- }
- if (needs_barrier) {
- int err;
- err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
- if (!ret)
- ret = err;
+ jbd2_log_wait_commit(sbi->s_journal, target);
}
-
- return ret;
-}
-
-static int ext4_sync_fs_nojournal(struct super_block *sb, int wait)
-{
- int ret = 0;
-
- trace_ext4_sync_fs(sb, wait);
- flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
- flush_workqueue(EXT4_SB(sb)->unrsv_conversion_wq);
- dquot_writeback_dquots(sb, -1);
- if (wait && test_opt(sb, BARRIER))
- ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
-
return ret;
}
diff --git a/trunk/fs/f2fs/Kconfig b/trunk/fs/f2fs/Kconfig
index e06e0995e00f..fd27e7e6326e 100644
--- a/trunk/fs/f2fs/Kconfig
+++ b/trunk/fs/f2fs/Kconfig
@@ -51,15 +51,3 @@ config F2FS_FS_POSIX_ACL
Linux website .
If you don't know what Access Control Lists are, say N
-
-config F2FS_FS_SECURITY
- bool "F2FS Security Labels"
- depends on F2FS_FS_XATTR
- help
- Security labels provide an access control facility to support Linux
- Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO
- Linux. This option enables an extended attribute handler for file
- security labels in the f2fs filesystem, so that it requires enabling
- the extended attribute support in advance.
-
- If you are not using a security module, say N.
diff --git a/trunk/fs/f2fs/acl.c b/trunk/fs/f2fs/acl.c
index b7826ec1b470..44abc2f286e0 100644
--- a/trunk/fs/f2fs/acl.c
+++ b/trunk/fs/f2fs/acl.c
@@ -250,7 +250,7 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
}
}
- error = f2fs_setxattr(inode, name_index, "", value, size, NULL);
+ error = f2fs_setxattr(inode, name_index, "", value, size);
kfree(value);
if (!error)
diff --git a/trunk/fs/f2fs/checkpoint.c b/trunk/fs/f2fs/checkpoint.c
index 66a6b85a51d8..b1de01da1a40 100644
--- a/trunk/fs/f2fs/checkpoint.c
+++ b/trunk/fs/f2fs/checkpoint.c
@@ -357,8 +357,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
unsigned long blk_size = sbi->blocksize;
struct f2fs_checkpoint *cp_block;
unsigned long long cur_version = 0, pre_version = 0;
+ unsigned int crc = 0;
size_t crc_offset;
- __u32 crc = 0;
/* Read the 1st cp block in this CP pack */
cp_page_1 = get_meta_page(sbi, cp_addr);
@@ -369,7 +369,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
if (crc_offset >= blk_size)
goto invalid_cp1;
- crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
+ crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
if (!f2fs_crc_valid(crc, cp_block, crc_offset))
goto invalid_cp1;
@@ -384,7 +384,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
if (crc_offset >= blk_size)
goto invalid_cp2;
- crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
+ crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
if (!f2fs_crc_valid(crc, cp_block, crc_offset))
goto invalid_cp2;
@@ -450,29 +450,12 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
return -EINVAL;
}
-static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
-{
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- struct list_head *head = &sbi->dir_inode_list;
- struct list_head *this;
-
- list_for_each(this, head) {
- struct dir_inode_entry *entry;
- entry = list_entry(this, struct dir_inode_entry, list);
- if (entry->inode == inode)
- return -EEXIST;
- }
- list_add_tail(&new->list, head);
-#ifdef CONFIG_F2FS_STAT_FS
- sbi->n_dirty_dirs++;
-#endif
- return 0;
-}
-
void set_dirty_dir_page(struct inode *inode, struct page *page)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct list_head *head = &sbi->dir_inode_list;
struct dir_inode_entry *new;
+ struct list_head *this;
if (!S_ISDIR(inode->i_mode))
return;
@@ -486,31 +469,23 @@ void set_dirty_dir_page(struct inode *inode, struct page *page)
INIT_LIST_HEAD(&new->list);
spin_lock(&sbi->dir_inode_lock);
- if (__add_dirty_inode(inode, new))
- kmem_cache_free(inode_entry_slab, new);
+ list_for_each(this, head) {
+ struct dir_inode_entry *entry;
+ entry = list_entry(this, struct dir_inode_entry, list);
+ if (entry->inode == inode) {
+ kmem_cache_free(inode_entry_slab, new);
+ goto out;
+ }
+ }
+ list_add_tail(&new->list, head);
+ sbi->n_dirty_dirs++;
+ BUG_ON(!S_ISDIR(inode->i_mode));
+out:
inc_page_count(sbi, F2FS_DIRTY_DENTS);
inode_inc_dirty_dents(inode);
SetPagePrivate(page);
- spin_unlock(&sbi->dir_inode_lock);
-}
-void add_dirty_dir_inode(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- struct dir_inode_entry *new;
-retry:
- new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
- if (!new) {
- cond_resched();
- goto retry;
- }
- new->inode = inode;
- INIT_LIST_HEAD(&new->list);
-
- spin_lock(&sbi->dir_inode_lock);
- if (__add_dirty_inode(inode, new))
- kmem_cache_free(inode_entry_slab, new);
spin_unlock(&sbi->dir_inode_lock);
}
@@ -524,10 +499,8 @@ void remove_dirty_dir_inode(struct inode *inode)
return;
spin_lock(&sbi->dir_inode_lock);
- if (atomic_read(&F2FS_I(inode)->dirty_dents)) {
- spin_unlock(&sbi->dir_inode_lock);
- return;
- }
+ if (atomic_read(&F2FS_I(inode)->dirty_dents))
+ goto out;
list_for_each(this, head) {
struct dir_inode_entry *entry;
@@ -535,38 +508,12 @@ void remove_dirty_dir_inode(struct inode *inode)
if (entry->inode == inode) {
list_del(&entry->list);
kmem_cache_free(inode_entry_slab, entry);
-#ifdef CONFIG_F2FS_STAT_FS
sbi->n_dirty_dirs--;
-#endif
- break;
- }
- }
- spin_unlock(&sbi->dir_inode_lock);
-
- /* Only from the recovery routine */
- if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
- clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
- iput(inode);
- }
-}
-
-struct inode *check_dirty_dir_inode(struct f2fs_sb_info *sbi, nid_t ino)
-{
- struct list_head *head = &sbi->dir_inode_list;
- struct list_head *this;
- struct inode *inode = NULL;
-
- spin_lock(&sbi->dir_inode_lock);
- list_for_each(this, head) {
- struct dir_inode_entry *entry;
- entry = list_entry(this, struct dir_inode_entry, list);
- if (entry->inode->i_ino == ino) {
- inode = entry->inode;
break;
}
}
+out:
spin_unlock(&sbi->dir_inode_lock);
- return inode;
}
void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
@@ -648,7 +595,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
block_t start_blk;
struct page *cp_page;
unsigned int data_sum_blocks, orphan_blocks;
- __u32 crc32 = 0;
+ unsigned int crc32 = 0;
void *kaddr;
int i;
@@ -717,8 +664,8 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
- *((__le32 *)((unsigned char *)ckpt +
- le32_to_cpu(ckpt->checksum_offset)))
+ *(__le32 *)((unsigned char *)ckpt +
+ le32_to_cpu(ckpt->checksum_offset))
= cpu_to_le32(crc32);
start_blk = __start_cp_addr(sbi);
diff --git a/trunk/fs/f2fs/data.c b/trunk/fs/f2fs/data.c
index 035f9a345cdf..91ff93b0b0f4 100644
--- a/trunk/fs/f2fs/data.c
+++ b/trunk/fs/f2fs/data.c
@@ -68,9 +68,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
struct buffer_head *bh_result)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
-#ifdef CONFIG_F2FS_STAT_FS
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-#endif
pgoff_t start_fofs, end_fofs;
block_t start_blkaddr;
@@ -80,9 +78,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
return 0;
}
-#ifdef CONFIG_F2FS_STAT_FS
sbi->total_hit_ext++;
-#endif
start_fofs = fi->ext.fofs;
end_fofs = fi->ext.fofs + fi->ext.len - 1;
start_blkaddr = fi->ext.blk_addr;
@@ -100,9 +96,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
else
bh_result->b_size = UINT_MAX;
-#ifdef CONFIG_F2FS_STAT_FS
sbi->read_hit_ext++;
-#endif
read_unlock(&fi->ext.ext_lock);
return 1;
}
@@ -205,7 +199,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
if (dn.data_blkaddr == NEW_ADDR)
return ERR_PTR(-EINVAL);
- page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
+ page = grab_cache_page(mapping, index);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -239,23 +233,18 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
struct page *page;
int err;
-repeat:
- page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
- if (!page)
- return ERR_PTR(-ENOMEM);
-
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
- if (err) {
- f2fs_put_page(page, 1);
+ if (err)
return ERR_PTR(err);
- }
f2fs_put_dnode(&dn);
- if (dn.data_blkaddr == NULL_ADDR) {
- f2fs_put_page(page, 1);
+ if (dn.data_blkaddr == NULL_ADDR)
return ERR_PTR(-ENOENT);
- }
+repeat:
+ page = grab_cache_page(mapping, index);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
if (PageUptodate(page))
return page;
@@ -285,10 +274,9 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
*
* Also, caller should grab and release a mutex by calling mutex_lock_op() and
* mutex_unlock_op().
- * Note that, npage is set only by make_empty_dir.
*/
-struct page *get_new_data_page(struct inode *inode,
- struct page *npage, pgoff_t index, bool new_i_size)
+struct page *get_new_data_page(struct inode *inode, pgoff_t index,
+ bool new_i_size)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct address_space *mapping = inode->i_mapping;
@@ -296,20 +284,18 @@ struct page *get_new_data_page(struct inode *inode,
struct dnode_of_data dn;
int err;
- set_new_dnode(&dn, inode, npage, npage, 0);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, ALLOC_NODE);
if (err)
return ERR_PTR(err);
if (dn.data_blkaddr == NULL_ADDR) {
if (reserve_new_block(&dn)) {
- if (!npage)
- f2fs_put_dnode(&dn);
+ f2fs_put_dnode(&dn);
return ERR_PTR(-ENOSPC);
}
}
- if (!npage)
- f2fs_put_dnode(&dn);
+ f2fs_put_dnode(&dn);
repeat:
page = grab_cache_page(mapping, index);
if (!page)
@@ -339,8 +325,6 @@ struct page *get_new_data_page(struct inode *inode,
if (new_i_size &&
i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
- /* Only the directory inode sets new_i_size */
- set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
mark_inode_dirty_sync(inode);
}
return page;
@@ -497,9 +481,8 @@ int do_write_data_page(struct page *page)
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
- if (unlikely(old_blk_addr != NEW_ADDR &&
- !is_cold_data(page) &&
- need_inplace_update(inode))) {
+ if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
+ need_inplace_update(inode)) {
rewrite_data_page(F2FS_SB(inode->i_sb), page,
old_blk_addr);
} else {
@@ -701,27 +684,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
return err;
}
-static int f2fs_write_end(struct file *file,
- struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- struct inode *inode = page->mapping->host;
-
- SetPageUptodate(page);
- set_page_dirty(page);
-
- if (pos + copied > i_size_read(inode)) {
- i_size_write(inode, pos + copied);
- mark_inode_dirty(inode);
- update_inode_page(inode);
- }
-
- unlock_page(page);
- page_cache_release(page);
- return copied;
-}
-
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
@@ -736,8 +698,7 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
get_data_block_ro);
}
-static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
- unsigned int length)
+static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -779,7 +740,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.writepage = f2fs_write_data_page,
.writepages = f2fs_write_data_pages,
.write_begin = f2fs_write_begin,
- .write_end = f2fs_write_end,
+ .write_end = nobh_write_end,
.set_page_dirty = f2fs_set_data_page_dirty,
.invalidatepage = f2fs_invalidate_data_page,
.releasepage = f2fs_release_data_page,
diff --git a/trunk/fs/f2fs/debug.c b/trunk/fs/f2fs/debug.c
index 0d6c6aafb235..8d9943786c31 100644
--- a/trunk/fs/f2fs/debug.c
+++ b/trunk/fs/f2fs/debug.c
@@ -175,12 +175,12 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
static int stat_show(struct seq_file *s, void *v)
{
- struct f2fs_stat_info *si;
+ struct f2fs_stat_info *si, *next;
int i = 0;
int j;
mutex_lock(&f2fs_stat_mutex);
- list_for_each_entry(si, &f2fs_stat_list, stat_list) {
+ list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
char devname[BDEVNAME_SIZE];
update_general_status(si->sbi);
diff --git a/trunk/fs/f2fs/dir.c b/trunk/fs/f2fs/dir.c
index 9d1cd423450d..1ac6b93036b7 100644
--- a/trunk/fs/f2fs/dir.c
+++ b/trunk/fs/f2fs/dir.c
@@ -13,7 +13,6 @@
#include "f2fs.h"
#include "node.h"
#include "acl.h"
-#include "xattr.h"
static unsigned long dir_blocks(struct inode *inode)
{
@@ -216,9 +215,9 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
{
- struct page *page;
- struct f2fs_dir_entry *de;
- struct f2fs_dentry_block *dentry_blk;
+ struct page *page = NULL;
+ struct f2fs_dir_entry *de = NULL;
+ struct f2fs_dentry_block *dentry_blk = NULL;
page = get_lock_data_page(dir, 0);
if (IS_ERR(page))
@@ -265,10 +264,15 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
f2fs_put_page(page, 1);
}
-static void init_dent_inode(const struct qstr *name, struct page *ipage)
+void init_dent_inode(const struct qstr *name, struct page *ipage)
{
struct f2fs_node *rn;
+ if (IS_ERR(ipage))
+ return;
+
+ wait_on_page_writeback(ipage);
+
/* copy name info. to this inode page */
rn = (struct f2fs_node *)page_address(ipage);
rn->i.i_namelen = cpu_to_le32(name->len);
@@ -276,15 +280,14 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
set_page_dirty(ipage);
}
-static int make_empty_dir(struct inode *inode,
- struct inode *parent, struct page *page)
+static int make_empty_dir(struct inode *inode, struct inode *parent)
{
struct page *dentry_page;
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dir_entry *de;
void *kaddr;
- dentry_page = get_new_data_page(inode, page, 0, true);
+ dentry_page = get_new_data_page(inode, 0, true);
if (IS_ERR(dentry_page))
return PTR_ERR(dentry_page);
@@ -314,76 +317,63 @@ static int make_empty_dir(struct inode *inode,
return 0;
}
-static struct page *init_inode_metadata(struct inode *inode,
+static int init_inode_metadata(struct inode *inode,
struct inode *dir, const struct qstr *name)
{
- struct page *page;
- int err;
-
if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
- page = new_inode_page(inode, name);
- if (IS_ERR(page))
- return page;
+ int err;
+ err = new_inode_page(inode, name);
+ if (err)
+ return err;
if (S_ISDIR(inode->i_mode)) {
- err = make_empty_dir(inode, dir, page);
- if (err)
- goto error;
+ err = make_empty_dir(inode, dir);
+ if (err) {
+ remove_inode_page(inode);
+ return err;
+ }
}
err = f2fs_init_acl(inode, dir);
- if (err)
- goto error;
-
- err = f2fs_init_security(inode, dir, name, page);
- if (err)
- goto error;
-
- wait_on_page_writeback(page);
+ if (err) {
+ remove_inode_page(inode);
+ return err;
+ }
} else {
- page = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino);
- if (IS_ERR(page))
- return page;
-
- wait_on_page_writeback(page);
- set_cold_node(inode, page);
+ struct page *ipage;
+ ipage = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino);
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
+ set_cold_node(inode, ipage);
+ init_dent_inode(name, ipage);
+ f2fs_put_page(ipage, 1);
}
-
- init_dent_inode(name, page);
-
- /*
- * This file should be checkpointed during fsync.
- * We lost i_pino from now on.
- */
if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
- file_lost_pino(inode);
inc_nlink(inode);
+ update_inode_page(inode);
}
- return page;
-
-error:
- f2fs_put_page(page, 1);
- remove_inode_page(inode);
- return ERR_PTR(err);
+ return 0;
}
static void update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth)
{
+ bool need_dir_update = false;
+
if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
if (S_ISDIR(inode->i_mode)) {
inc_nlink(dir);
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
+ need_dir_update = true;
}
clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
}
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
if (F2FS_I(dir)->i_current_depth != current_depth) {
F2FS_I(dir)->i_current_depth = current_depth;
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
+ need_dir_update = true;
}
- if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR))
+ if (need_dir_update)
update_inode_page(dir);
else
mark_inode_dirty(dir);
@@ -433,7 +423,6 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in
struct page *dentry_page = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
int slots = GET_DENTRY_SLOTS(namelen);
- struct page *page;
int err = 0;
int i;
@@ -459,7 +448,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in
bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket));
for (block = bidx; block <= (bidx + nblock - 1); block++) {
- dentry_page = get_new_data_page(dir, NULL, block, true);
+ dentry_page = get_new_data_page(dir, block, true);
if (IS_ERR(dentry_page))
return PTR_ERR(dentry_page);
@@ -476,13 +465,12 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in
++level;
goto start;
add_dentry:
+ err = init_inode_metadata(inode, dir, name);
+ if (err)
+ goto fail;
+
wait_on_page_writeback(dentry_page);
- page = init_inode_metadata(inode, dir, name);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto fail;
- }
de = &dentry_blk->dentry[bit_pos];
de->hash_code = dentry_hash;
de->name_len = cpu_to_le16(namelen);
@@ -493,14 +481,11 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in
test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
set_page_dirty(dentry_page);
- /* we don't need to mark_inode_dirty now */
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
- f2fs_put_page(page, 1);
-
update_parent_metadata(dir, inode, current_depth);
+
+ /* update parent inode number before releasing dentry page */
+ F2FS_I(inode)->i_pino = dir->i_ino;
fail:
- clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
return err;
@@ -606,19 +591,24 @@ bool f2fs_empty_dir(struct inode *dir)
return true;
}
-static int f2fs_readdir(struct file *file, struct dir_context *ctx)
+static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
+ unsigned long pos = file->f_pos;
struct inode *inode = file_inode(file);
unsigned long npages = dir_blocks(inode);
+ unsigned char *types = NULL;
unsigned int bit_pos = 0, start_bit_pos = 0;
+ int over = 0;
struct f2fs_dentry_block *dentry_blk = NULL;
struct f2fs_dir_entry *de = NULL;
struct page *dentry_page = NULL;
- unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
+ unsigned int n = 0;
unsigned char d_type = DT_UNKNOWN;
int slots;
- bit_pos = ((unsigned long)ctx->pos % NR_DENTRY_IN_BLOCK);
+ types = f2fs_filetype_table;
+ bit_pos = (pos % NR_DENTRY_IN_BLOCK);
+ n = (pos / NR_DENTRY_IN_BLOCK);
for ( ; n < npages; n++) {
dentry_page = get_lock_data_page(inode, n);
@@ -628,28 +618,31 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
start_bit_pos = bit_pos;
dentry_blk = kmap(dentry_page);
while (bit_pos < NR_DENTRY_IN_BLOCK) {
+ d_type = DT_UNKNOWN;
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
NR_DENTRY_IN_BLOCK,
bit_pos);
if (bit_pos >= NR_DENTRY_IN_BLOCK)
break;
- ctx->pos += bit_pos - start_bit_pos;
de = &dentry_blk->dentry[bit_pos];
- if (de->file_type < F2FS_FT_MAX)
- d_type = f2fs_filetype_table[de->file_type];
- else
- d_type = DT_UNKNOWN;
- if (!dir_emit(ctx,
- dentry_blk->filename[bit_pos],
- le16_to_cpu(de->name_len),
- le32_to_cpu(de->ino), d_type))
+ if (types && de->file_type < F2FS_FT_MAX)
+ d_type = types[de->file_type];
+
+ over = filldir(dirent,
+ dentry_blk->filename[bit_pos],
+ le16_to_cpu(de->name_len),
+ (n * NR_DENTRY_IN_BLOCK) + bit_pos,
+ le32_to_cpu(de->ino), d_type);
+ if (over) {
+ file->f_pos += bit_pos - start_bit_pos;
goto success;
+ }
slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
bit_pos += slots;
}
bit_pos = 0;
- ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK;
+ file->f_pos = (n + 1) * NR_DENTRY_IN_BLOCK;
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
dentry_page = NULL;
@@ -666,7 +659,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
const struct file_operations f2fs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = f2fs_readdir,
+ .readdir = f2fs_readdir,
.fsync = f2fs_sync_file,
.unlocked_ioctl = f2fs_ioctl,
};
diff --git a/trunk/fs/f2fs/f2fs.h b/trunk/fs/f2fs/f2fs.h
index 467d42d65c48..20aab02f2a42 100644
--- a/trunk/fs/f2fs/f2fs.h
+++ b/trunk/fs/f2fs/f2fs.h
@@ -37,35 +37,21 @@
typecheck(unsigned long long, b) && \
((long long)((a) - (b)) > 0))
-typedef u32 block_t; /*
- * should not change u32, since it is the on-disk block
- * address format, __le32.
- */
+typedef u64 block_t;
typedef u32 nid_t;
struct f2fs_mount_info {
unsigned int opt;
};
-#define CRCPOLY_LE 0xedb88320
-
-static inline __u32 f2fs_crc32(void *buf, size_t len)
+static inline __u32 f2fs_crc32(void *buff, size_t len)
{
- unsigned char *p = (unsigned char *)buf;
- __u32 crc = F2FS_SUPER_MAGIC;
- int i;
-
- while (len--) {
- crc ^= *p++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
- }
- return crc;
+ return crc32_le(F2FS_SUPER_MAGIC, buff, len);
}
-static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size)
+static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size)
{
- return f2fs_crc32(buf, buf_size) == blk_crc;
+ return f2fs_crc32(buff, buff_size) == blk_crc;
}
/*
@@ -162,7 +148,7 @@ struct extent_info {
* i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
*/
#define FADVISE_COLD_BIT 0x01
-#define FADVISE_LOST_PINO_BIT 0x02
+#define FADVISE_CP_BIT 0x02
struct f2fs_inode_info {
struct inode vfs_inode; /* serve a vfs inode */
@@ -383,6 +369,7 @@ struct f2fs_sb_info {
/* for directory inode management */
struct list_head dir_inode_list; /* dir inode list */
spinlock_t dir_inode_lock; /* for dir inode list lock */
+ unsigned int n_dirty_dirs; /* # of dir inodes */
/* basic file system units */
unsigned int log_sectors_per_block; /* log2 sectors per block */
@@ -419,15 +406,12 @@ struct f2fs_sb_info {
* for stat information.
* one is for the LFS mode, and the other is for the SSR mode.
*/
-#ifdef CONFIG_F2FS_STAT_FS
struct f2fs_stat_info *stat_info; /* FS status information */
unsigned int segment_count[2]; /* # of allocated segments */
unsigned int block_count[2]; /* # of allocated blocks */
+ unsigned int last_victim[2]; /* last victim segment # */
int total_hit_ext, read_hit_ext; /* extent cache hit ratio */
int bg_gc; /* background gc calls */
- unsigned int n_dirty_dirs; /* # of dir inodes */
-#endif
- unsigned int last_victim[2]; /* last victim segment # */
spinlock_t stat_lock; /* lock for stat operations */
};
@@ -511,17 +495,9 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
{
- int i;
-
- for (i = 0; i < NR_GLOBAL_LOCKS; i++) {
- /*
- * This is the only time we take multiple fs_lock[]
- * instances; the order is immaterial since we
- * always hold cp_mutex, which serializes multiple
- * such operations.
- */
- mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex);
- }
+ int i = 0;
+ for (; i < NR_GLOBAL_LOCKS; i++)
+ mutex_lock(&sbi->fs_lock[i]);
}
static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
@@ -867,12 +843,9 @@ static inline int f2fs_clear_bit(unsigned int nr, char *addr)
/* used for f2fs_inode_info->flags */
enum {
FI_NEW_INODE, /* indicate newly allocated inode */
- FI_DIRTY_INODE, /* indicate inode is dirty or not */
FI_INC_LINK, /* need to increment i_nlink */
FI_ACL_MODE, /* indicate acl mode */
FI_NO_ALLOC, /* should not allocate any blocks */
- FI_UPDATE_DIR, /* should update inode block for consistency */
- FI_DELAY_IPUT, /* used for the recovery */
};
static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
@@ -905,21 +878,14 @@ static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
return 0;
}
-static inline int f2fs_readonly(struct super_block *sb)
-{
- return sb->s_flags & MS_RDONLY;
-}
-
/*
* file.c
*/
int f2fs_sync_file(struct file *, loff_t, loff_t, int);
void truncate_data_blocks(struct dnode_of_data *);
void f2fs_truncate(struct inode *);
-int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
int f2fs_setattr(struct dentry *, struct iattr *);
int truncate_hole(struct inode *, pgoff_t, pgoff_t);
-int truncate_data_blocks_range(struct dnode_of_data *, int);
long f2fs_ioctl(struct file *, unsigned int, unsigned long);
long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
@@ -947,6 +913,7 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
struct page *, struct inode *);
+void init_dent_inode(const struct qstr *, struct page *);
int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
int f2fs_make_empty(struct inode *, struct inode *);
@@ -981,8 +948,8 @@ void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
int truncate_inode_blocks(struct inode *, pgoff_t);
int remove_inode_page(struct inode *);
-struct page *new_inode_page(struct inode *, const struct qstr *);
-struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
+int new_inode_page(struct inode *, const struct qstr *);
+struct page *new_node_page(struct dnode_of_data *, unsigned int);
void ra_node_page(struct f2fs_sb_info *, nid_t);
struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_node_page_ra(struct page *, int);
@@ -1007,6 +974,7 @@ void destroy_node_manager_caches(void);
*/
void f2fs_balance_fs(struct f2fs_sb_info *);
void invalidate_blocks(struct f2fs_sb_info *, block_t);
+void locate_dirty_segment(struct f2fs_sb_info *, unsigned int);
void clear_prefree_segments(struct f2fs_sb_info *);
int npages_for_summary_flush(struct f2fs_sb_info *);
void allocate_new_segments(struct f2fs_sb_info *);
@@ -1043,9 +1011,7 @@ void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
int recover_orphan_inodes(struct f2fs_sb_info *);
int get_valid_checkpoint(struct f2fs_sb_info *);
void set_dirty_dir_page(struct inode *, struct page *);
-void add_dirty_dir_inode(struct inode *);
void remove_dirty_dir_inode(struct inode *);
-struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t);
void sync_dirty_dir_inodes(struct f2fs_sb_info *);
void write_checkpoint(struct f2fs_sb_info *, bool);
void init_orphan_info(struct f2fs_sb_info *);
@@ -1059,7 +1025,7 @@ int reserve_new_block(struct dnode_of_data *);
void update_extent_cache(block_t, struct dnode_of_data *);
struct page *find_data_page(struct inode *, pgoff_t, bool);
struct page *get_lock_data_page(struct inode *, pgoff_t);
-struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
+struct page *get_new_data_page(struct inode *, pgoff_t, bool);
int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
int do_write_data_page(struct page *);
diff --git a/trunk/fs/f2fs/file.c b/trunk/fs/f2fs/file.c
index d2d2b7dbdcc1..1cae864f8dfc 100644
--- a/trunk/fs/f2fs/file.c
+++ b/trunk/fs/f2fs/file.c
@@ -63,10 +63,9 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
f2fs_put_dnode(&dn);
mutex_unlock_op(sbi, ilock);
- file_update_time(vma->vm_file);
lock_page(page);
if (page->mapping != inode->i_mapping ||
- page_offset(page) > i_size_read(inode) ||
+ page_offset(page) >= i_size_read(inode) ||
!PageUptodate(page)) {
unlock_page(page);
err = -EFAULT;
@@ -77,7 +76,10 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
* check to see if the page is mapped already (no holes)
*/
if (PageMappedToDisk(page))
- goto mapped;
+ goto out;
+
+ /* fill the page */
+ wait_on_page_writeback(page);
/* page is wholly or partially inside EOF */
if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
@@ -88,9 +90,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
set_page_dirty(page);
SetPageUptodate(page);
-mapped:
- /* fill the page */
- wait_on_page_writeback(page);
+ file_update_time(vma->vm_file);
out:
sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(err);
@@ -102,24 +102,6 @@ static const struct vm_operations_struct f2fs_file_vm_ops = {
.remap_pages = generic_file_remap_pages,
};
-static int get_parent_ino(struct inode *inode, nid_t *pino)
-{
- struct dentry *dentry;
-
- inode = igrab(inode);
- dentry = d_find_any_alias(inode);
- iput(inode);
- if (!dentry)
- return 0;
-
- inode = igrab(dentry->d_parent->d_inode);
- dput(dentry);
-
- *pino = inode->i_ino;
- iput(inode);
- return 1;
-}
-
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
struct inode *inode = file->f_mapping->host;
@@ -132,7 +114,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
.for_reclaim = 0,
};
- if (f2fs_readonly(inode->i_sb))
+ if (inode->i_sb->s_flags & MS_RDONLY)
return 0;
trace_f2fs_sync_file_enter(inode);
@@ -152,7 +134,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
need_cp = true;
- else if (file_wrong_pino(inode))
+ else if (is_cp_file(inode))
need_cp = true;
else if (!space_for_roll_forward(sbi))
need_cp = true;
@@ -160,23 +142,11 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
need_cp = true;
if (need_cp) {
- nid_t pino;
-
/* all the dirty node pages should be flushed for POR */
ret = f2fs_sync_fs(inode->i_sb, 1);
- if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
- get_parent_ino(inode, &pino)) {
- F2FS_I(inode)->i_pino = pino;
- file_got_pino(inode);
- mark_inode_dirty_sync(inode);
- ret = f2fs_write_inode(inode, NULL);
- if (ret)
- goto out;
- }
} else {
/* if there is no written node page, write its inode page */
while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
- mark_inode_dirty_sync(inode);
ret = f2fs_write_inode(inode, NULL);
if (ret)
goto out;
@@ -198,7 +168,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+static int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
int nr_free = 0, ofs = dn->ofs_in_node;
struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
@@ -215,10 +185,10 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
update_extent_cache(NULL_ADDR, dn);
invalidate_blocks(sbi, blkaddr);
+ dec_valid_block_count(sbi, dn->inode, 1);
nr_free++;
}
if (nr_free) {
- dec_valid_block_count(sbi, dn->inode, nr_free);
set_page_dirty(dn->node_page);
sync_inode_page(dn);
}
@@ -321,7 +291,7 @@ void f2fs_truncate(struct inode *inode)
}
}
-int f2fs_getattr(struct vfsmount *mnt,
+static int f2fs_getattr(struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
@@ -417,7 +387,7 @@ static void fill_zero(struct inode *inode, pgoff_t index,
f2fs_balance_fs(sbi);
ilock = mutex_lock_op(sbi);
- page = get_new_data_page(inode, NULL, index, false);
+ page = get_new_data_page(inode, index, false);
mutex_unlock_op(sbi, ilock);
if (!IS_ERR(page)) {
@@ -605,10 +575,10 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int ret;
switch (cmd) {
- case F2FS_IOC_GETFLAGS:
+ case FS_IOC_GETFLAGS:
flags = fi->i_flags & FS_FL_USER_VISIBLE;
return put_user(flags, (int __user *) arg);
- case F2FS_IOC_SETFLAGS:
+ case FS_IOC_SETFLAGS:
{
unsigned int oldflags;
diff --git a/trunk/fs/f2fs/gc.c b/trunk/fs/f2fs/gc.c
index 35f9b1a196aa..14961593e93c 100644
--- a/trunk/fs/f2fs/gc.c
+++ b/trunk/fs/f2fs/gc.c
@@ -76,9 +76,7 @@ static int gc_thread_func(void *data)
else
wait_ms = increase_sleep_time(wait_ms);
-#ifdef CONFIG_F2FS_STAT_FS
sbi->bg_gc++;
-#endif
/* if return value is not zero, no victim was selected */
if (f2fs_gc(sbi))
@@ -91,28 +89,23 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
{
struct f2fs_gc_kthread *gc_th;
dev_t dev = sbi->sb->s_bdev->bd_dev;
- int err = 0;
if (!test_opt(sbi, BG_GC))
- goto out;
+ return 0;
gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
- if (!gc_th) {
- err = -ENOMEM;
- goto out;
- }
+ if (!gc_th)
+ return -ENOMEM;
sbi->gc_thread = gc_th;
init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(gc_th->f2fs_gc_task)) {
- err = PTR_ERR(gc_th->f2fs_gc_task);
kfree(gc_th);
sbi->gc_thread = NULL;
+ return -ENOMEM;
}
-
-out:
- return err;
+ return 0;
}
void stop_gc_thread(struct f2fs_sb_info *sbi)
@@ -241,14 +234,14 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct victim_sel_policy p;
- unsigned int secno, max_cost;
+ unsigned int secno;
int nsearched = 0;
p.alloc_mode = alloc_mode;
select_policy(sbi, gc_type, type, &p);
p.min_segno = NULL_SEGNO;
- p.min_cost = max_cost = get_max_cost(sbi, &p);
+ p.min_cost = get_max_cost(sbi, &p);
mutex_lock(&dirty_i->seglist_lock);
@@ -287,7 +280,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
p.min_cost = cost;
}
- if (cost == max_cost)
+ if (cost == get_max_cost(sbi, &p))
continue;
if (nsearched++ >= MAX_VICTIM_SEARCH) {
@@ -295,8 +288,8 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
break;
}
}
- if (p.min_segno != NULL_SEGNO) {
got_it:
+ if (p.min_segno != NULL_SEGNO) {
if (p.alloc_mode == LFS) {
secno = GET_SECNO(sbi, p.min_segno);
if (gc_type == FG_GC)
@@ -321,21 +314,28 @@ static const struct victim_selection default_v_ops = {
static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
{
+ struct list_head *this;
struct inode_entry *ie;
- list_for_each_entry(ie, ilist, list)
+ list_for_each(this, ilist) {
+ ie = list_entry(this, struct inode_entry, list);
if (ie->inode->i_ino == ino)
return ie->inode;
+ }
return NULL;
}
static void add_gc_inode(struct inode *inode, struct list_head *ilist)
{
- struct inode_entry *new_ie;
+ struct list_head *this;
+ struct inode_entry *new_ie, *ie;
- if (inode == find_gc_inode(inode->i_ino, ilist)) {
- iput(inode);
- return;
+ list_for_each(this, ilist) {
+ ie = list_entry(this, struct inode_entry, list);
+ if (ie->inode == inode) {
+ iput(inode);
+ return;
+ }
}
repeat:
new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
diff --git a/trunk/fs/f2fs/inode.c b/trunk/fs/f2fs/inode.c
index 2b2d45d19e3e..91ac7f9d88ee 100644
--- a/trunk/fs/f2fs/inode.c
+++ b/trunk/fs/f2fs/inode.c
@@ -109,6 +109,12 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
ret = do_read_inode(inode);
if (ret)
goto bad_inode;
+
+ if (!sbi->por_doing && inode->i_nlink == 0) {
+ ret = -ENOENT;
+ goto bad_inode;
+ }
+
make_now:
if (ino == F2FS_NODE_INO(sbi)) {
inode->i_mapping->a_ops = &f2fs_node_aops;
@@ -124,7 +130,8 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
- mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
+ mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE |
+ __GFP_ZERO);
} else if (S_ISLNK(inode->i_mode)) {
inode->i_op = &f2fs_symlink_inode_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
@@ -192,7 +199,6 @@ void update_inode(struct inode *inode, struct page *node_page)
set_cold_node(inode, node_page);
set_page_dirty(node_page);
- clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
}
int update_inode_page(struct inode *inode)
@@ -218,9 +224,6 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
inode->i_ino == F2FS_META_INO(sbi))
return 0;
- if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE))
- return 0;
-
if (wbc)
f2fs_balance_fs(sbi);
diff --git a/trunk/fs/f2fs/namei.c b/trunk/fs/f2fs/namei.c
index 64c07169df05..47abc9722b17 100644
--- a/trunk/fs/f2fs/namei.c
+++ b/trunk/fs/f2fs/namei.c
@@ -112,7 +112,7 @@ static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode,
int count = le32_to_cpu(sbi->raw_super->extension_count);
for (i = 0; i < count; i++) {
if (is_multimedia_file(name, extlist[i])) {
- file_set_cold(inode);
+ set_cold_file(inode);
break;
}
}
@@ -149,7 +149,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
alloc_nid_done(sbi, ino);
- d_instantiate(dentry, inode);
+ if (!sbi->por_doing)
+ d_instantiate(dentry, inode);
unlock_new_inode(inode);
return 0;
out:
@@ -172,7 +173,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
f2fs_balance_fs(sbi);
inode->i_ctime = CURRENT_TIME;
- ihold(inode);
+ atomic_inc(&inode->i_count);
set_inode_flag(F2FS_I(inode), FI_INC_LINK);
ilock = mutex_lock_op(sbi);
@@ -181,10 +182,17 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
if (err)
goto out;
+ /*
+ * This file should be checkpointed during fsync.
+ * We lost i_pino from now on.
+ */
+ set_cp_file(inode);
+
d_instantiate(dentry, inode);
return 0;
out:
clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ make_bad_inode(inode);
iput(inode);
return err;
}
@@ -490,7 +498,6 @@ const struct inode_operations f2fs_dir_inode_operations = {
.rmdir = f2fs_rmdir,
.mknod = f2fs_mknod,
.rename = f2fs_rename,
- .getattr = f2fs_getattr,
.setattr = f2fs_setattr,
.get_acl = f2fs_get_acl,
#ifdef CONFIG_F2FS_FS_XATTR
@@ -505,7 +512,6 @@ const struct inode_operations f2fs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
- .getattr = f2fs_getattr,
.setattr = f2fs_setattr,
#ifdef CONFIG_F2FS_FS_XATTR
.setxattr = generic_setxattr,
@@ -516,7 +522,6 @@ const struct inode_operations f2fs_symlink_inode_operations = {
};
const struct inode_operations f2fs_special_inode_operations = {
- .getattr = f2fs_getattr,
.setattr = f2fs_setattr,
.get_acl = f2fs_get_acl,
#ifdef CONFIG_F2FS_FS_XATTR
diff --git a/trunk/fs/f2fs/node.c b/trunk/fs/f2fs/node.c
index b418aee09573..3df43b4efd89 100644
--- a/trunk/fs/f2fs/node.c
+++ b/trunk/fs/f2fs/node.c
@@ -408,13 +408,10 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
level = get_node_path(index, offset, noffset);
nids[0] = dn->inode->i_ino;
- npage[0] = dn->inode_page;
+ npage[0] = get_node_page(sbi, nids[0]);
+ if (IS_ERR(npage[0]))
+ return PTR_ERR(npage[0]);
- if (!npage[0]) {
- npage[0] = get_node_page(sbi, nids[0]);
- if (IS_ERR(npage[0]))
- return PTR_ERR(npage[0]);
- }
parent = npage[0];
if (level != 0)
nids[1] = get_nid(parent, offset[0], true);
@@ -433,7 +430,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
}
dn->nid = nids[i];
- npage[i] = new_node_page(dn, noffset[i], NULL);
+ npage[i] = new_node_page(dn, noffset[i]);
if (IS_ERR(npage[i])) {
alloc_nid_failed(sbi, nids[i]);
err = PTR_ERR(npage[i]);
@@ -806,19 +803,22 @@ int remove_inode_page(struct inode *inode)
return 0;
}
-struct page *new_inode_page(struct inode *inode, const struct qstr *name)
+int new_inode_page(struct inode *inode, const struct qstr *name)
{
+ struct page *page;
struct dnode_of_data dn;
/* allocate inode page for new inode */
set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
-
- /* caller should f2fs_put_page(page, 1); */
- return new_node_page(&dn, 0, NULL);
+ page = new_node_page(&dn, 0);
+ init_dent_inode(name, page);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+ f2fs_put_page(page, 1);
+ return 0;
}
-struct page *new_node_page(struct dnode_of_data *dn,
- unsigned int ofs, struct page *ipage)
+struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
{
struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
struct address_space *mapping = sbi->node_inode->i_mapping;
@@ -851,10 +851,7 @@ struct page *new_node_page(struct dnode_of_data *dn,
set_cold_node(dn->inode, page);
dn->node_page = page;
- if (ipage)
- update_inode(dn->inode, ipage);
- else
- sync_inode_page(dn);
+ sync_inode_page(dn);
set_page_dirty(page);
if (ofs == 0)
inc_valid_inode_count(sbi);
@@ -1208,8 +1205,7 @@ static int f2fs_set_node_page_dirty(struct page *page)
return 0;
}
-static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
- unsigned int length)
+static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -1496,10 +1492,9 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
new_ni = old_ni;
new_ni.ino = ino;
- if (!inc_valid_node_count(sbi, NULL, 1))
- WARN_ON(1);
set_node_addr(sbi, &new_ni, NEW_ADDR);
inc_valid_inode_count(sbi);
+
f2fs_put_page(ipage, 1);
return 0;
}
diff --git a/trunk/fs/f2fs/node.h b/trunk/fs/f2fs/node.h
index c65fb4f4230f..0a2d72f0024d 100644
--- a/trunk/fs/f2fs/node.h
+++ b/trunk/fs/f2fs/node.h
@@ -275,27 +275,25 @@ static inline nid_t get_nid(struct page *p, int off, bool i)
* - Mark cold node blocks in their node footer
* - Mark cold data pages in page cache
*/
-static inline int is_file(struct inode *inode, int type)
+static inline int is_cold_file(struct inode *inode)
{
- return F2FS_I(inode)->i_advise & type;
+ return F2FS_I(inode)->i_advise & FADVISE_COLD_BIT;
}
-static inline void set_file(struct inode *inode, int type)
+static inline void set_cold_file(struct inode *inode)
{
- F2FS_I(inode)->i_advise |= type;
+ F2FS_I(inode)->i_advise |= FADVISE_COLD_BIT;
}
-static inline void clear_file(struct inode *inode, int type)
+static inline int is_cp_file(struct inode *inode)
{
- F2FS_I(inode)->i_advise &= ~type;
+ return F2FS_I(inode)->i_advise & FADVISE_CP_BIT;
}
-#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
-#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
-#define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
-#define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT)
-#define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
-#define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT)
+static inline void set_cp_file(struct inode *inode)
+{
+ F2FS_I(inode)->i_advise |= FADVISE_CP_BIT;
+}
static inline int is_cold_data(struct page *page)
{
@@ -312,16 +310,29 @@ static inline void clear_cold_data(struct page *page)
ClearPageChecked(page);
}
-static inline int is_node(struct page *page, int type)
+static inline int is_cold_node(struct page *page)
{
void *kaddr = page_address(page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
- return le32_to_cpu(rn->footer.flag) & (1 << type);
+ unsigned int flag = le32_to_cpu(rn->footer.flag);
+ return flag & (0x1 << COLD_BIT_SHIFT);
}
-#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT)
-#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
-#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
+static inline unsigned char is_fsync_dnode(struct page *page)
+{
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ unsigned int flag = le32_to_cpu(rn->footer.flag);
+ return flag & (0x1 << FSYNC_BIT_SHIFT);
+}
+
+static inline unsigned char is_dent_dnode(struct page *page)
+{
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ unsigned int flag = le32_to_cpu(rn->footer.flag);
+ return flag & (0x1 << DENT_BIT_SHIFT);
+}
static inline void set_cold_node(struct inode *inode, struct page *page)
{
@@ -335,15 +346,26 @@ static inline void set_cold_node(struct inode *inode, struct page *page)
rn->footer.flag = cpu_to_le32(flag);
}
-static inline void set_mark(struct page *page, int mark, int type)
+static inline void set_fsync_mark(struct page *page, int mark)
{
- struct f2fs_node *rn = (struct f2fs_node *)page_address(page);
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ unsigned int flag = le32_to_cpu(rn->footer.flag);
+ if (mark)
+ flag |= (0x1 << FSYNC_BIT_SHIFT);
+ else
+ flag &= ~(0x1 << FSYNC_BIT_SHIFT);
+ rn->footer.flag = cpu_to_le32(flag);
+}
+
+static inline void set_dentry_mark(struct page *page, int mark)
+{
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
unsigned int flag = le32_to_cpu(rn->footer.flag);
if (mark)
- flag |= (0x1 << type);
+ flag |= (0x1 << DENT_BIT_SHIFT);
else
- flag &= ~(0x1 << type);
+ flag &= ~(0x1 << DENT_BIT_SHIFT);
rn->footer.flag = cpu_to_le32(flag);
}
-#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
-#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
diff --git a/trunk/fs/f2fs/recovery.c b/trunk/fs/f2fs/recovery.c
index d56d951c2253..60c8a5097058 100644
--- a/trunk/fs/f2fs/recovery.c
+++ b/trunk/fs/f2fs/recovery.c
@@ -40,54 +40,36 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
static int recover_dentry(struct page *ipage, struct inode *inode)
{
- void *kaddr = page_address(ipage);
- struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
+ struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
struct f2fs_inode *raw_inode = &(raw_node->i);
- nid_t pino = le32_to_cpu(raw_inode->i_pino);
- struct f2fs_dir_entry *de;
struct qstr name;
+ struct f2fs_dir_entry *de;
struct page *page;
- struct inode *dir, *einode;
+ struct inode *dir;
int err = 0;
- dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino);
- if (!dir) {
- dir = f2fs_iget(inode->i_sb, pino);
- if (IS_ERR(dir)) {
- err = PTR_ERR(dir);
- goto out;
- }
- set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
- add_dirty_dir_inode(dir);
+ if (!is_dent_dnode(ipage))
+ goto out;
+
+ dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino));
+ if (IS_ERR(dir)) {
+ err = PTR_ERR(dir);
+ goto out;
}
name.len = le32_to_cpu(raw_inode->i_namelen);
name.name = raw_inode->i_name;
-retry:
+
de = f2fs_find_entry(dir, &name, &page);
- if (de && inode->i_ino == le32_to_cpu(de->ino)) {
+ if (de) {
kunmap(page);
f2fs_put_page(page, 0);
- goto out;
- }
- if (de) {
- einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
- if (IS_ERR(einode)) {
- WARN_ON(1);
- if (PTR_ERR(einode) == -ENOENT)
- err = -EEXIST;
- goto out;
- }
- f2fs_delete_entry(de, page, einode);
- iput(einode);
- goto retry;
+ } else {
+ err = __f2fs_add_link(dir, &name, inode);
}
- err = __f2fs_add_link(dir, &name, inode);
+ iput(dir);
out:
- f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
- "ino = %x, name = %s, dir = %lx, err = %d",
- ino_of_node(ipage), raw_inode->i_name,
- IS_ERR(dir) ? 0 : dir->i_ino, err);
+ kunmap(ipage);
return err;
}
@@ -97,9 +79,6 @@ static int recover_inode(struct inode *inode, struct page *node_page)
struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
struct f2fs_inode *raw_inode = &(raw_node->i);
- if (!IS_INODE(node_page))
- return 0;
-
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
i_size_write(inode, le64_to_cpu(raw_inode->i_size));
inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
@@ -109,12 +88,7 @@ static int recover_inode(struct inode *inode, struct page *node_page)
inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
- if (is_dent_dnode(node_page))
- return recover_dentry(node_page, inode);
-
- f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
- ino_of_node(node_page), raw_inode->i_name);
- return 0;
+ return recover_dentry(node_page, inode);
}
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
@@ -145,13 +119,14 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
lock_page(page);
if (cp_ver != cpver_of_node(page))
- break;
+ goto unlock_out;
if (!is_fsync_dnode(page))
goto next;
entry = get_fsync_inode(head, ino_of_node(page));
if (entry) {
+ entry->blkaddr = blkaddr;
if (IS_INODE(page) && is_dent_dnode(page))
set_inode_flag(F2FS_I(entry->inode),
FI_INC_LINK);
@@ -159,40 +134,48 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
if (IS_INODE(page) && is_dent_dnode(page)) {
err = recover_inode_page(sbi, page);
if (err)
- break;
+ goto unlock_out;
}
/* add this fsync inode to the list */
entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
if (!entry) {
err = -ENOMEM;
- break;
+ goto unlock_out;
}
entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
if (IS_ERR(entry->inode)) {
err = PTR_ERR(entry->inode);
kmem_cache_free(fsync_entry_slab, entry);
- break;
+ goto unlock_out;
}
+
list_add_tail(&entry->list, head);
+ entry->blkaddr = blkaddr;
+ }
+ if (IS_INODE(page)) {
+ err = recover_inode(entry->inode, page);
+ if (err == -ENOENT) {
+ goto next;
+ } else if (err) {
+ err = -EINVAL;
+ goto unlock_out;
+ }
}
- entry->blkaddr = blkaddr;
-
- err = recover_inode(entry->inode, page);
- if (err && err != -ENOENT)
- break;
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
}
+unlock_out:
unlock_page(page);
out:
__free_pages(page, 0);
return err;
}
-static void destroy_fsync_dnodes(struct list_head *head)
+static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
+ struct list_head *head)
{
struct fsync_inode_entry *entry, *tmp;
@@ -203,15 +186,15 @@ static void destroy_fsync_dnodes(struct list_head *head)
}
}
-static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
- block_t blkaddr, struct dnode_of_data *dn)
+static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
{
struct seg_entry *sentry;
unsigned int segno = GET_SEGNO(sbi, blkaddr);
unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
(sbi->blocks_per_seg - 1);
struct f2fs_summary sum;
- nid_t ino, nid;
+ nid_t ino;
void *kaddr;
struct inode *inode;
struct page *node_page;
@@ -220,7 +203,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
sentry = get_seg_entry(sbi, segno);
if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
- return 0;
+ return;
/* Get the previous summary */
for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
@@ -239,39 +222,20 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
f2fs_put_page(sum_page, 1);
}
- /* Use the locked dnode page and inode */
- nid = le32_to_cpu(sum.nid);
- if (dn->inode->i_ino == nid) {
- struct dnode_of_data tdn = *dn;
- tdn.nid = nid;
- tdn.node_page = dn->inode_page;
- tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
- truncate_data_blocks_range(&tdn, 1);
- return 0;
- } else if (dn->nid == nid) {
- struct dnode_of_data tdn = *dn;
- tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
- truncate_data_blocks_range(&tdn, 1);
- return 0;
- }
-
/* Get the node page */
- node_page = get_node_page(sbi, nid);
- if (IS_ERR(node_page))
- return PTR_ERR(node_page);
+ node_page = get_node_page(sbi, le32_to_cpu(sum.nid));
bidx = start_bidx_of_node(ofs_of_node(node_page)) +
- le16_to_cpu(sum.ofs_in_node);
+ le16_to_cpu(sum.ofs_in_node);
ino = ino_of_node(node_page);
f2fs_put_page(node_page, 1);
/* Deallocate previous index in the node page */
inode = f2fs_iget(sbi->sb, ino);
if (IS_ERR(inode))
- return PTR_ERR(inode);
+ return;
truncate_hole(inode, bidx, bidx + 1);
iput(inode);
- return 0;
}
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
@@ -281,7 +245,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct dnode_of_data dn;
struct f2fs_summary sum;
struct node_info ni;
- int err = 0, recovered = 0;
+ int err = 0;
int ilock;
start = start_bidx_of_node(ofs_of_node(page));
@@ -319,16 +283,13 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
}
/* Check the previous node page having this index */
- err = check_index_in_prev_nodes(sbi, dest, &dn);
- if (err)
- goto err;
+ check_index_in_prev_nodes(sbi, dest);
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* write dummy data page */
recover_data_page(sbi, NULL, &sum, src, dest);
update_extent_cache(dest, &dn);
- recovered++;
}
dn.ofs_in_node++;
}
@@ -344,14 +305,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
set_page_dirty(dn.node_page);
recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
-err:
f2fs_put_dnode(&dn);
mutex_unlock_op(sbi, ilock);
-
- f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
- "recovered_data = %d blocks, err = %d",
- inode->i_ino, recovered, err);
- return err;
+ return 0;
}
static int recover_data(struct f2fs_sb_info *sbi,
@@ -384,7 +340,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
lock_page(page);
if (cp_ver != cpver_of_node(page))
- break;
+ goto unlock_out;
entry = get_fsync_inode(head, ino_of_node(page));
if (!entry)
@@ -392,7 +348,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
err = do_recover_data(sbi, entry->inode, page, blkaddr);
if (err)
- break;
+ goto out;
if (entry->blkaddr == blkaddr) {
iput(entry->inode);
@@ -403,6 +359,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
}
+unlock_out:
unlock_page(page);
out:
__free_pages(page, 0);
@@ -425,7 +382,6 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&inode_list);
/* step #1: find fsynced inode numbers */
- sbi->por_doing = 1;
err = find_fsync_dnodes(sbi, &inode_list);
if (err)
goto out;
@@ -434,13 +390,13 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
goto out;
/* step #2: recover data */
+ sbi->por_doing = 1;
err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
+ sbi->por_doing = 0;
BUG_ON(!list_empty(&inode_list));
out:
- destroy_fsync_dnodes(&inode_list);
+ destroy_fsync_dnodes(sbi, &inode_list);
kmem_cache_destroy(fsync_entry_slab);
- sbi->por_doing = 0;
- if (!err)
- write_checkpoint(sbi, false);
+ write_checkpoint(sbi, false);
return err;
}
diff --git a/trunk/fs/f2fs/segment.c b/trunk/fs/f2fs/segment.c
index a86d125a9885..d8e84e49a5c3 100644
--- a/trunk/fs/f2fs/segment.c
+++ b/trunk/fs/f2fs/segment.c
@@ -94,7 +94,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
* Adding dirty entry into seglist is not critical operation.
* If a given segment is one of current working segments, it won't be added.
*/
-static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
+void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned short valid_blocks;
@@ -126,16 +126,17 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned int segno = -1;
+ unsigned int segno, offset = 0;
unsigned int total_segs = TOTAL_SEGS(sbi);
mutex_lock(&dirty_i->seglist_lock);
while (1) {
segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
- segno + 1);
+ offset);
if (segno >= total_segs)
break;
__set_test_and_free(sbi, segno);
+ offset = segno + 1;
}
mutex_unlock(&dirty_i->seglist_lock);
}
@@ -143,16 +144,17 @@ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
void clear_prefree_segments(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned int segno = -1;
+ unsigned int segno, offset = 0;
unsigned int total_segs = TOTAL_SEGS(sbi);
mutex_lock(&dirty_i->seglist_lock);
while (1) {
segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
- segno + 1);
+ offset);
if (segno >= total_segs)
break;
+ offset = segno + 1;
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
dirty_i->nr_dirty[PRE]--;
@@ -255,11 +257,11 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
* This function should be resided under the curseg_mutex lock
*/
static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
- struct f2fs_summary *sum)
+ struct f2fs_summary *sum, unsigned short offset)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
void *addr = curseg->sum_blk;
- addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
+ addr += offset * sizeof(struct f2fs_summary);
memcpy(addr, sum, sizeof(struct f2fs_summary));
return;
}
@@ -309,14 +311,64 @@ static void write_sum_page(struct f2fs_sb_info *sbi,
f2fs_put_page(page, 1);
}
+static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
+ unsigned int segno;
+ unsigned int ofs = 0;
+
+ /*
+ * If there is not enough reserved sections,
+ * we should not reuse prefree segments.
+ */
+ if (has_not_enough_free_secs(sbi, 0))
+ return NULL_SEGNO;
+
+ /*
+ * NODE page should not reuse prefree segment,
+ * since those information is used for SPOR.
+ */
+ if (IS_NODESEG(type))
+ return NULL_SEGNO;
+next:
+ segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
+ ofs += sbi->segs_per_sec;
+
+ if (segno < TOTAL_SEGS(sbi)) {
+ int i;
+
+ /* skip intermediate segments in a section */
+ if (segno % sbi->segs_per_sec)
+ goto next;
+
+ /* skip if the section is currently used */
+ if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
+ goto next;
+
+ /* skip if whole section is not prefree */
+ for (i = 1; i < sbi->segs_per_sec; i++)
+ if (!test_bit(segno + i, prefree_segmap))
+ goto next;
+
+ /* skip if whole section was not free at the last checkpoint */
+ for (i = 0; i < sbi->segs_per_sec; i++)
+ if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
+ goto next;
+
+ return segno;
+ }
+ return NULL_SEGNO;
+}
+
static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- unsigned int segno = curseg->segno + 1;
+ unsigned int segno = curseg->segno;
struct free_segmap_info *free_i = FREE_I(sbi);
- if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
- return !test_bit(segno, free_i->free_segmap);
+ if (segno + 1 < TOTAL_SEGS(sbi) && (segno + 1) % sbi->segs_per_sec)
+ return !test_bit(segno + 1, free_i->free_segmap);
return 0;
}
@@ -443,7 +495,7 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
int dir = ALLOC_LEFT;
write_sum_page(sbi, curseg->sum_blk,
- GET_SUM_BLOCK(sbi, segno));
+ GET_SUM_BLOCK(sbi, curseg->segno));
if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
dir = ALLOC_RIGHT;
@@ -547,7 +599,11 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
goto out;
}
- if (type == CURSEG_WARM_NODE)
+ curseg->next_segno = check_prefree_segments(sbi, type);
+
+ if (curseg->next_segno != NULL_SEGNO)
+ change_curseg(sbi, type, false);
+ else if (type == CURSEG_WARM_NODE)
new_curseg(sbi, type, false);
else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
new_curseg(sbi, type, false);
@@ -556,10 +612,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
else
new_curseg(sbi, type, false);
out:
-#ifdef CONFIG_F2FS_STAT_FS
sbi->segment_count[curseg->alloc_type]++;
-#endif
- return;
}
void allocate_new_segments(struct f2fs_sb_info *sbi)
@@ -742,7 +795,7 @@ static int __get_segment_type_6(struct page *page, enum page_type p_type)
if (S_ISDIR(inode->i_mode))
return CURSEG_HOT_DATA;
- else if (is_cold_data(page) || file_is_cold(inode))
+ else if (is_cold_data(page) || is_cold_file(inode))
return CURSEG_COLD_DATA;
else
return CURSEG_WARM_DATA;
@@ -791,13 +844,11 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
* because, this function updates a summary entry in the
* current summary block.
*/
- __add_sum_entry(sbi, type, sum);
+ __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
mutex_lock(&sit_i->sentry_lock);
__refresh_next_blkoff(sbi, curseg);
-#ifdef CONFIG_F2FS_STAT_FS
sbi->block_count[curseg->alloc_type]++;
-#endif
/*
* SIT information should be updated before segment allocation,
@@ -892,7 +943,7 @@ void recover_data_page(struct f2fs_sb_info *sbi,
curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
(sbi->blocks_per_seg - 1);
- __add_sum_entry(sbi, type, sum);
+ __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
@@ -929,7 +980,7 @@ void rewrite_node_page(struct f2fs_sb_info *sbi,
}
curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
(sbi->blocks_per_seg - 1);
- __add_sum_entry(sbi, type, sum);
+ __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
/* change the current log to the next block addr in advance */
if (next_segno != segno) {
@@ -1528,13 +1579,13 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct free_segmap_info *free_i = FREE_I(sbi);
- unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi);
+ unsigned int segno = 0, offset = 0;
unsigned short valid_blocks;
- while (1) {
+ while (segno < TOTAL_SEGS(sbi)) {
/* find dirty segment based on free segmap */
- segno = find_next_inuse(free_i, total_segs, offset);
- if (segno >= total_segs)
+ segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset);
+ if (segno >= TOTAL_SEGS(sbi))
break;
offset = segno + 1;
valid_blocks = get_valid_blocks(sbi, segno, 0);
diff --git a/trunk/fs/f2fs/super.c b/trunk/fs/f2fs/super.c
index 75c7dc363e92..8555f7df82c7 100644
--- a/trunk/fs/f2fs/super.c
+++ b/trunk/fs/f2fs/super.c
@@ -34,7 +34,7 @@
static struct kmem_cache *f2fs_inode_cachep;
enum {
- Opt_gc_background,
+ Opt_gc_background_off,
Opt_disable_roll_forward,
Opt_discard,
Opt_noheap,
@@ -46,7 +46,7 @@ enum {
};
static match_table_t f2fs_tokens = {
- {Opt_gc_background, "background_gc=%s"},
+ {Opt_gc_background_off, "background_gc_off"},
{Opt_disable_roll_forward, "disable_roll_forward"},
{Opt_discard, "discard"},
{Opt_noheap, "no_heap"},
@@ -76,91 +76,6 @@ static void init_once(void *foo)
inode_init_once(&fi->vfs_inode);
}
-static int parse_options(struct super_block *sb, char *options)
-{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
- substring_t args[MAX_OPT_ARGS];
- char *p, *name;
- int arg = 0;
-
- if (!options)
- return 0;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
- /*
- * Initialize args struct so we know whether arg was
- * found; some options take optional arguments.
- */
- args[0].to = args[0].from = NULL;
- token = match_token(p, f2fs_tokens, args);
-
- switch (token) {
- case Opt_gc_background:
- name = match_strdup(&args[0]);
-
- if (!name)
- return -ENOMEM;
- if (!strncmp(name, "on", 2))
- set_opt(sbi, BG_GC);
- else if (!strncmp(name, "off", 3))
- clear_opt(sbi, BG_GC);
- else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_disable_roll_forward:
- set_opt(sbi, DISABLE_ROLL_FORWARD);
- break;
- case Opt_discard:
- set_opt(sbi, DISCARD);
- break;
- case Opt_noheap:
- set_opt(sbi, NOHEAP);
- break;
-#ifdef CONFIG_F2FS_FS_XATTR
- case Opt_nouser_xattr:
- clear_opt(sbi, XATTR_USER);
- break;
-#else
- case Opt_nouser_xattr:
- f2fs_msg(sb, KERN_INFO,
- "nouser_xattr options not supported");
- break;
-#endif
-#ifdef CONFIG_F2FS_FS_POSIX_ACL
- case Opt_noacl:
- clear_opt(sbi, POSIX_ACL);
- break;
-#else
- case Opt_noacl:
- f2fs_msg(sb, KERN_INFO, "noacl options not supported");
- break;
-#endif
- case Opt_active_logs:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
- return -EINVAL;
- sbi->active_logs = arg;
- break;
- case Opt_disable_ext_identify:
- set_opt(sbi, DISABLE_EXT_IDENTIFY);
- break;
- default:
- f2fs_msg(sb, KERN_ERR,
- "Unrecognized mount option \"%s\" or missing value",
- p);
- return -EINVAL;
- }
- }
- return 0;
-}
-
static struct inode *f2fs_alloc_inode(struct super_block *sb)
{
struct f2fs_inode_info *fi;
@@ -197,17 +112,6 @@ static int f2fs_drop_inode(struct inode *inode)
return generic_drop_inode(inode);
}
-/*
- * f2fs_dirty_inode() is called from __mark_inode_dirty()
- *
- * We should call set_dirty_inode to write the dirty inode through write_inode.
- */
-static void f2fs_dirty_inode(struct inode *inode, int flags)
-{
- set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
- return;
-}
-
static void f2fs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
@@ -266,7 +170,7 @@ static int f2fs_freeze(struct super_block *sb)
{
int err;
- if (f2fs_readonly(sb))
+ if (sb->s_flags & MS_RDONLY)
return 0;
err = f2fs_sync_fs(sb, 1);
@@ -310,10 +214,10 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
{
struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
- if (!(root->d_sb->s_flags & MS_RDONLY) && test_opt(sbi, BG_GC))
- seq_printf(seq, ",background_gc=%s", "on");
+ if (test_opt(sbi, BG_GC))
+ seq_puts(seq, ",background_gc_on");
else
- seq_printf(seq, ",background_gc=%s", "off");
+ seq_puts(seq, ",background_gc_off");
if (test_opt(sbi, DISABLE_ROLL_FORWARD))
seq_puts(seq, ",disable_roll_forward");
if (test_opt(sbi, DISCARD))
@@ -340,64 +244,11 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
return 0;
}
-static int f2fs_remount(struct super_block *sb, int *flags, char *data)
-{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
- struct f2fs_mount_info org_mount_opt;
- int err, active_logs;
-
- /*
- * Save the old mount options in case we
- * need to restore them.
- */
- org_mount_opt = sbi->mount_opt;
- active_logs = sbi->active_logs;
-
- /* parse mount options */
- err = parse_options(sb, data);
- if (err)
- goto restore_opts;
-
- /*
- * Previous and new state of filesystem is RO,
- * so no point in checking GC conditions.
- */
- if ((sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY))
- goto skip;
-
- /*
- * We stop the GC thread if FS is mounted as RO
- * or if background_gc = off is passed in mount
- * option. Also sync the filesystem.
- */
- if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
- if (sbi->gc_thread) {
- stop_gc_thread(sbi);
- f2fs_sync_fs(sb, 1);
- }
- } else if (test_opt(sbi, BG_GC) && !sbi->gc_thread) {
- err = start_gc_thread(sbi);
- if (err)
- goto restore_opts;
- }
-skip:
- /* Update the POSIXACL Flag */
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
- return 0;
-
-restore_opts:
- sbi->mount_opt = org_mount_opt;
- sbi->active_logs = active_logs;
- return err;
-}
-
static struct super_operations f2fs_sops = {
.alloc_inode = f2fs_alloc_inode,
.drop_inode = f2fs_drop_inode,
.destroy_inode = f2fs_destroy_inode,
.write_inode = f2fs_write_inode,
- .dirty_inode = f2fs_dirty_inode,
.show_options = f2fs_show_options,
.evict_inode = f2fs_evict_inode,
.put_super = f2fs_put_super,
@@ -405,7 +256,6 @@ static struct super_operations f2fs_sops = {
.freeze_fs = f2fs_freeze,
.unfreeze_fs = f2fs_unfreeze,
.statfs = f2fs_statfs,
- .remount_fs = f2fs_remount,
};
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
@@ -453,6 +303,79 @@ static const struct export_operations f2fs_export_ops = {
.get_parent = f2fs_get_parent,
};
+static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi,
+ char *options)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *p;
+ int arg = 0;
+
+ if (!options)
+ return 0;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+ if (!*p)
+ continue;
+ /*
+ * Initialize args struct so we know whether arg was
+ * found; some options take optional arguments.
+ */
+ args[0].to = args[0].from = NULL;
+ token = match_token(p, f2fs_tokens, args);
+
+ switch (token) {
+ case Opt_gc_background_off:
+ clear_opt(sbi, BG_GC);
+ break;
+ case Opt_disable_roll_forward:
+ set_opt(sbi, DISABLE_ROLL_FORWARD);
+ break;
+ case Opt_discard:
+ set_opt(sbi, DISCARD);
+ break;
+ case Opt_noheap:
+ set_opt(sbi, NOHEAP);
+ break;
+#ifdef CONFIG_F2FS_FS_XATTR
+ case Opt_nouser_xattr:
+ clear_opt(sbi, XATTR_USER);
+ break;
+#else
+ case Opt_nouser_xattr:
+ f2fs_msg(sb, KERN_INFO,
+ "nouser_xattr options not supported");
+ break;
+#endif
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+ case Opt_noacl:
+ clear_opt(sbi, POSIX_ACL);
+ break;
+#else
+ case Opt_noacl:
+ f2fs_msg(sb, KERN_INFO, "noacl options not supported");
+ break;
+#endif
+ case Opt_active_logs:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
+ return -EINVAL;
+ sbi->active_logs = arg;
+ break;
+ case Opt_disable_ext_identify:
+ set_opt(sbi, DISABLE_EXT_IDENTIFY);
+ break;
+ default:
+ f2fs_msg(sb, KERN_ERR,
+ "Unrecognized mount option \"%s\" or missing value",
+ p);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static loff_t max_file_size(unsigned bits)
{
loff_t result = ADDRS_PER_INODE;
@@ -618,7 +541,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
if (err)
goto free_sb_buf;
}
- sb->s_fs_info = sbi;
/* init some FS parameters */
sbi->active_logs = NR_CURSEG_TYPE;
@@ -631,7 +553,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
set_opt(sbi, POSIX_ACL);
#endif
/* parse mount options */
- err = parse_options(sb, (char *)data);
+ err = parse_options(sb, sbi, (char *)data);
if (err)
goto free_sb_buf;
@@ -643,6 +565,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_xattr = f2fs_xattr_handlers;
sb->s_export_op = &f2fs_export_ops;
sb->s_magic = F2FS_SUPER_MAGIC;
+ sb->s_fs_info = sbi;
sb->s_time_gran = 1;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
@@ -751,16 +674,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
"Cannot recover all fsync data errno=%ld", err);
}
- /*
- * If filesystem is not mounted as read-only then
- * do start the gc_thread.
- */
- if (!(sb->s_flags & MS_RDONLY)) {
- /* After POR, we can run background GC thread.*/
- err = start_gc_thread(sbi);
- if (err)
- goto fail;
- }
+ /* After POR, we can run background GC thread */
+ err = start_gc_thread(sbi);
+ if (err)
+ goto fail;
err = f2fs_build_stats(sbi);
if (err)
diff --git a/trunk/fs/f2fs/xattr.c b/trunk/fs/f2fs/xattr.c
index 3ab07ecd86ca..0b02dce31356 100644
--- a/trunk/fs/f2fs/xattr.c
+++ b/trunk/fs/f2fs/xattr.c
@@ -20,7 +20,6 @@
*/
#include
#include
-#include
#include "f2fs.h"
#include "xattr.h"
@@ -44,10 +43,6 @@ static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list,
prefix = XATTR_TRUSTED_PREFIX;
prefix_len = XATTR_TRUSTED_PREFIX_LEN;
break;
- case F2FS_XATTR_INDEX_SECURITY:
- prefix = XATTR_SECURITY_PREFIX;
- prefix_len = XATTR_SECURITY_PREFIX_LEN;
- break;
default:
return -EINVAL;
}
@@ -55,7 +50,7 @@ static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list,
total_len = prefix_len + name_len + 1;
if (list && total_len <= list_size) {
memcpy(list, prefix, prefix_len);
- memcpy(list + prefix_len, name, name_len);
+ memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
return total_len;
@@ -75,14 +70,13 @@ static int f2fs_xattr_generic_get(struct dentry *dentry, const char *name,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
break;
- case F2FS_XATTR_INDEX_SECURITY:
- break;
default:
return -EINVAL;
}
if (strcmp(name, "") == 0)
return -EINVAL;
- return f2fs_getxattr(dentry->d_inode, type, name, buffer, size);
+ return f2fs_getxattr(dentry->d_inode, type, name,
+ buffer, size);
}
static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name,
@@ -99,15 +93,13 @@ static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
break;
- case F2FS_XATTR_INDEX_SECURITY:
- break;
default:
return -EINVAL;
}
if (strcmp(name, "") == 0)
return -EINVAL;
- return f2fs_setxattr(dentry->d_inode, type, name, value, size, NULL);
+ return f2fs_setxattr(dentry->d_inode, type, name, value, size);
}
static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list,
@@ -153,31 +145,6 @@ static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
return 0;
}
-#ifdef CONFIG_F2FS_FS_SECURITY
-static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
- void *page)
-{
- const struct xattr *xattr;
- int err = 0;
-
- for (xattr = xattr_array; xattr->name != NULL; xattr++) {
- err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
- xattr->name, xattr->value,
- xattr->value_len, (struct page *)page);
- if (err < 0)
- break;
- }
- return err;
-}
-
-int f2fs_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, struct page *ipage)
-{
- return security_inode_init_security(inode, dir, qstr,
- &f2fs_initxattrs, ipage);
-}
-#endif
-
const struct xattr_handler f2fs_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.flags = F2FS_XATTR_INDEX_USER,
@@ -202,14 +169,6 @@ const struct xattr_handler f2fs_xattr_advise_handler = {
.set = f2fs_xattr_advise_set,
};
-const struct xattr_handler f2fs_xattr_security_handler = {
- .prefix = XATTR_SECURITY_PREFIX,
- .flags = F2FS_XATTR_INDEX_SECURITY,
- .list = f2fs_xattr_generic_list,
- .get = f2fs_xattr_generic_get,
- .set = f2fs_xattr_generic_set,
-};
-
static const struct xattr_handler *f2fs_xattr_handler_map[] = {
[F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler,
#ifdef CONFIG_F2FS_FS_POSIX_ACL
@@ -217,9 +176,6 @@ static const struct xattr_handler *f2fs_xattr_handler_map[] = {
[F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &f2fs_xattr_acl_default_handler,
#endif
[F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler,
-#ifdef CONFIG_F2FS_FS_SECURITY
- [F2FS_XATTR_INDEX_SECURITY] = &f2fs_xattr_security_handler,
-#endif
[F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler,
};
@@ -230,9 +186,6 @@ const struct xattr_handler *f2fs_xattr_handlers[] = {
&f2fs_xattr_acl_default_handler,
#endif
&f2fs_xattr_trusted_handler,
-#ifdef CONFIG_F2FS_FS_SECURITY
- &f2fs_xattr_security_handler,
-#endif
&f2fs_xattr_advise_handler,
NULL,
};
@@ -265,8 +218,6 @@ int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
return -ENODATA;
page = get_node_page(sbi, fi->i_xattr_nid);
- if (IS_ERR(page))
- return PTR_ERR(page);
base_addr = page_address(page);
list_for_each_xattr(entry, base_addr) {
@@ -317,8 +268,6 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
return 0;
page = get_node_page(sbi, fi->i_xattr_nid);
- if (IS_ERR(page))
- return PTR_ERR(page);
base_addr = page_address(page);
list_for_each_xattr(entry, base_addr) {
@@ -347,7 +296,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
}
int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
- const void *value, size_t value_len, struct page *ipage)
+ const void *value, size_t value_len)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -386,7 +335,7 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid);
mark_inode_dirty(inode);
- page = new_node_page(&dn, XATTR_NODE_OFFSET, ipage);
+ page = new_node_page(&dn, XATTR_NODE_OFFSET);
if (IS_ERR(page)) {
alloc_nid_failed(sbi, fi->i_xattr_nid);
fi->i_xattr_nid = 0;
@@ -486,10 +435,7 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
inode->i_ctime = CURRENT_TIME;
clear_inode_flag(fi, FI_ACL_MODE);
}
- if (ipage)
- update_inode(inode, ipage);
- else
- update_inode_page(inode);
+ update_inode_page(inode);
mutex_unlock_op(sbi, ilock);
return 0;
diff --git a/trunk/fs/f2fs/xattr.h b/trunk/fs/f2fs/xattr.h
index 3c0817bef25d..49c9558305e3 100644
--- a/trunk/fs/f2fs/xattr.h
+++ b/trunk/fs/f2fs/xattr.h
@@ -112,19 +112,21 @@ extern const struct xattr_handler f2fs_xattr_trusted_handler;
extern const struct xattr_handler f2fs_xattr_acl_access_handler;
extern const struct xattr_handler f2fs_xattr_acl_default_handler;
extern const struct xattr_handler f2fs_xattr_advise_handler;
-extern const struct xattr_handler f2fs_xattr_security_handler;
extern const struct xattr_handler *f2fs_xattr_handlers[];
-extern int f2fs_setxattr(struct inode *, int, const char *,
- const void *, size_t, struct page *);
-extern int f2fs_getxattr(struct inode *, int, const char *, void *, size_t);
-extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
+extern int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
+ const void *value, size_t value_len);
+extern int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
+ void *buffer, size_t buffer_size);
+extern ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
+ size_t buffer_size);
+
#else
#define f2fs_xattr_handlers NULL
static inline int f2fs_setxattr(struct inode *inode, int name_index,
- const char *name, const void *value, size_t value_len)
+ const char *name, const void *value, size_t value_len)
{
return -EOPNOTSUPP;
}
@@ -140,14 +142,4 @@ static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
}
#endif
-#ifdef CONFIG_F2FS_FS_SECURITY
-extern int f2fs_init_security(struct inode *, struct inode *,
- const struct qstr *, struct page *);
-#else
-static inline int f2fs_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, struct page *ipage)
-{
- return 0;
-}
-#endif
#endif /* __F2FS_XATTR_H__ */
diff --git a/trunk/fs/fat/dir.c b/trunk/fs/fat/dir.c
index 3963ede84eb0..7a6f02caf286 100644
--- a/trunk/fs/fat/dir.c
+++ b/trunk/fs/fat/dir.c
@@ -543,7 +543,6 @@ int fat_search_long(struct inode *inode, const unsigned char *name,
EXPORT_SYMBOL_GPL(fat_search_long);
struct fat_ioctl_filldir_callback {
- struct dir_context ctx;
void __user *dirent;
int result;
/* for dir ioctl */
@@ -553,9 +552,8 @@ struct fat_ioctl_filldir_callback {
int short_len;
};
-static int __fat_readdir(struct inode *inode, struct file *file,
- struct dir_context *ctx, int short_only,
- struct fat_ioctl_filldir_callback *both)
+static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent,
+ filldir_t filldir, int short_only, int both)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
@@ -566,20 +564,27 @@ static int __fat_readdir(struct inode *inode, struct file *file,
unsigned char bufname[FAT_MAX_SHORT_SIZE];
int isvfat = sbi->options.isvfat;
const char *fill_name = NULL;
- int fake_offset = 0;
+ unsigned long inum;
+ unsigned long lpos, dummy, *furrfu = &lpos;
loff_t cpos;
int short_len = 0, fill_len = 0;
int ret = 0;
mutex_lock(&sbi->s_lock);
- cpos = ctx->pos;
+ cpos = filp->f_pos;
/* Fake . and .. for the root directory. */
if (inode->i_ino == MSDOS_ROOT_INO) {
- if (!dir_emit_dots(file, ctx))
- goto out;
- if (ctx->pos == 2) {
- fake_offset = 1;
+ while (cpos < 2) {
+ if (filldir(dirent, "..", cpos+1, cpos,
+ MSDOS_ROOT_INO, DT_DIR) < 0)
+ goto out;
+ cpos++;
+ filp->f_pos++;
+ }
+ if (cpos == 2) {
+ dummy = 2;
+ furrfu = &dummy;
cpos = 0;
}
}
@@ -614,7 +619,7 @@ static int __fat_readdir(struct inode *inode, struct file *file,
int status = fat_parse_long(inode, &cpos, &bh, &de,
&unicode, &nr_slots);
if (status < 0) {
- ctx->pos = cpos;
+ filp->f_pos = cpos;
ret = status;
goto out;
} else if (status == PARSE_INVALID)
@@ -634,19 +639,6 @@ static int __fat_readdir(struct inode *inode, struct file *file,
/* !both && !short_only, so we don't need shortname. */
if (!both)
goto start_filldir;
-
- short_len = fat_parse_short(sb, de, bufname,
- sbi->options.dotsOK);
- if (short_len == 0)
- goto record_end;
- /* hack for fat_ioctl_filldir() */
- both->longname = fill_name;
- both->long_len = fill_len;
- both->shortname = bufname;
- both->short_len = short_len;
- fill_name = NULL;
- fill_len = 0;
- goto start_filldir;
}
}
@@ -654,21 +646,28 @@ static int __fat_readdir(struct inode *inode, struct file *file,
if (short_len == 0)
goto record_end;
- fill_name = bufname;
- fill_len = short_len;
+ if (nr_slots) {
+ /* hack for fat_ioctl_filldir() */
+ struct fat_ioctl_filldir_callback *p = dirent;
+
+ p->longname = fill_name;
+ p->long_len = fill_len;
+ p->shortname = bufname;
+ p->short_len = short_len;
+ fill_name = NULL;
+ fill_len = 0;
+ } else {
+ fill_name = bufname;
+ fill_len = short_len;
+ }
start_filldir:
- if (!fake_offset)
- ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
-
- if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) {
- if (!dir_emit_dot(file, ctx))
- goto fill_failed;
- } else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) {
- if (!dir_emit_dotdot(file, ctx))
- goto fill_failed;
+ lpos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
+ if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME))
+ inum = inode->i_ino;
+ else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) {
+ inum = parent_ino(filp->f_path.dentry);
} else {
- unsigned long inum;
loff_t i_pos = fat_make_i_pos(sb, bh, de);
struct inode *tmp = fat_iget(sb, i_pos);
if (tmp) {
@@ -676,17 +675,18 @@ static int __fat_readdir(struct inode *inode, struct file *file,
iput(tmp);
} else
inum = iunique(sb, MSDOS_ROOT_INO);
- if (!dir_emit(ctx, fill_name, fill_len, inum,
- (de->attr & ATTR_DIR) ? DT_DIR : DT_REG))
- goto fill_failed;
}
+ if (filldir(dirent, fill_name, fill_len, *furrfu, inum,
+ (de->attr & ATTR_DIR) ? DT_DIR : DT_REG) < 0)
+ goto fill_failed;
+
record_end:
- fake_offset = 0;
- ctx->pos = cpos;
+ furrfu = &lpos;
+ filp->f_pos = cpos;
goto get_new;
end_of_dir:
- ctx->pos = cpos;
+ filp->f_pos = cpos;
fill_failed:
brelse(bh);
if (unicode)
@@ -696,9 +696,10 @@ static int __fat_readdir(struct inode *inode, struct file *file,
return ret;
}
-static int fat_readdir(struct file *file, struct dir_context *ctx)
+static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- return __fat_readdir(file_inode(file), file, ctx, 0, NULL);
+ struct inode *inode = file_inode(filp);
+ return __fat_readdir(inode, filp, dirent, filldir, 0, 0);
}
#define FAT_IOCTL_FILLDIR_FUNC(func, dirent_type) \
@@ -754,25 +755,20 @@ efault: \
FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, __fat_dirent)
-static int fat_ioctl_readdir(struct inode *inode, struct file *file,
+static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
void __user *dirent, filldir_t filldir,
int short_only, int both)
{
- struct fat_ioctl_filldir_callback buf = {
- .ctx.actor = filldir,
- .dirent = dirent
- };
+ struct fat_ioctl_filldir_callback buf;
int ret;
buf.dirent = dirent;
buf.result = 0;
mutex_lock(&inode->i_mutex);
- buf.ctx.pos = file->f_pos;
ret = -ENOENT;
if (!IS_DEADDIR(inode)) {
- ret = __fat_readdir(inode, file, &buf.ctx,
- short_only, both ? &buf : NULL);
- file->f_pos = buf.ctx.pos;
+ ret = __fat_readdir(inode, filp, &buf, filldir,
+ short_only, both);
}
mutex_unlock(&inode->i_mutex);
if (ret >= 0)
@@ -858,7 +854,7 @@ static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd,
const struct file_operations fat_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = fat_readdir,
+ .readdir = fat_readdir,
.unlocked_ioctl = fat_dir_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = fat_compat_dir_ioctl,
diff --git a/trunk/fs/file_table.c b/trunk/fs/file_table.c
index 485dc0eddd67..cd4d87a82951 100644
--- a/trunk/fs/file_table.c
+++ b/trunk/fs/file_table.c
@@ -306,18 +306,17 @@ void fput(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
struct task_struct *task = current;
- unsigned long flags;
-
file_sb_list_del(file);
- if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
- init_task_work(&file->f_u.fu_rcuhead, ____fput);
- if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
- return;
+ if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) {
+ unsigned long flags;
+ spin_lock_irqsave(&delayed_fput_lock, flags);
+ list_add(&file->f_u.fu_list, &delayed_fput_list);
+ schedule_work(&delayed_fput_work);
+ spin_unlock_irqrestore(&delayed_fput_lock, flags);
+ return;
}
- spin_lock_irqsave(&delayed_fput_lock, flags);
- list_add(&file->f_u.fu_list, &delayed_fput_list);
- schedule_work(&delayed_fput_work);
- spin_unlock_irqrestore(&delayed_fput_lock, flags);
+ init_task_work(&file->f_u.fu_rcuhead, ____fput);
+ task_work_add(task, &file->f_u.fu_rcuhead, true);
}
}
diff --git a/trunk/fs/freevxfs/vxfs_lookup.c b/trunk/fs/freevxfs/vxfs_lookup.c
index 25d4099a4aea..664b07a53870 100644
--- a/trunk/fs/freevxfs/vxfs_lookup.c
+++ b/trunk/fs/freevxfs/vxfs_lookup.c
@@ -49,7 +49,7 @@
static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int);
-static int vxfs_readdir(struct file *, struct dir_context *);
+static int vxfs_readdir(struct file *, void *, filldir_t);
const struct inode_operations vxfs_dir_inode_ops = {
.lookup = vxfs_lookup,
@@ -58,7 +58,7 @@ const struct inode_operations vxfs_dir_inode_ops = {
const struct file_operations vxfs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = vxfs_readdir,
+ .readdir = vxfs_readdir,
};
@@ -235,7 +235,7 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags)
* Zero.
*/
static int
-vxfs_readdir(struct file *fp, struct dir_context *ctx)
+vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
{
struct inode *ip = file_inode(fp);
struct super_block *sbp = ip->i_sb;
@@ -243,17 +243,20 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
u_long page, npages, block, pblocks, nblocks, offset;
loff_t pos;
- if (ctx->pos == 0) {
- if (!dir_emit_dot(fp, ctx))
- return 0;
- ctx->pos = 1;
+ switch ((long)fp->f_pos) {
+ case 0:
+ if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0)
+ goto out;
+ fp->f_pos++;
+ /* fallthrough */
+ case 1:
+ if (filler(retp, "..", 2, fp->f_pos, VXFS_INO(ip)->vii_dotdot, DT_DIR) < 0)
+ goto out;
+ fp->f_pos++;
+ /* fallthrough */
}
- if (ctx->pos == 1) {
- if (!dir_emit(ctx, "..", 2, VXFS_INO(ip)->vii_dotdot, DT_DIR))
- return 0;
- ctx->pos = 2;
- }
- pos = ctx->pos - 2;
+
+ pos = fp->f_pos - 2;
if (pos > VXFS_DIRROUND(ip->i_size))
return 0;
@@ -267,16 +270,16 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;
for (; page < npages; page++, block = 0) {
- char *kaddr;
+ caddr_t kaddr;
struct page *pp;
pp = vxfs_get_page(ip->i_mapping, page);
if (IS_ERR(pp))
continue;
- kaddr = (char *)page_address(pp);
+ kaddr = (caddr_t)page_address(pp);
for (; block <= nblocks && block <= pblocks; block++) {
- char *baddr, *limit;
+ caddr_t baddr, limit;
struct vxfs_dirblk *dbp;
struct vxfs_direct *de;
@@ -289,18 +292,21 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
(kaddr + offset) :
(baddr + VXFS_DIRBLKOV(dbp)));
- for (; (char *)de <= limit; de = vxfs_next_entry(de)) {
+ for (; (caddr_t)de <= limit; de = vxfs_next_entry(de)) {
+ int over;
+
if (!de->d_reclen)
break;
if (!de->d_ino)
continue;
- offset = (char *)de - kaddr;
- ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
- if (!dir_emit(ctx, de->d_name, de->d_namelen,
- de->d_ino, DT_UNKNOWN)) {
+ offset = (caddr_t)de - kaddr;
+ over = filler(retp, de->d_name, de->d_namelen,
+ ((page << PAGE_CACHE_SHIFT) | offset) + 2,
+ de->d_ino, DT_UNKNOWN);
+ if (over) {
vxfs_put_page(pp);
- return 0;
+ goto done;
}
}
offset = 0;
@@ -308,6 +314,9 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
vxfs_put_page(pp);
offset = 0;
}
- ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
+
+done:
+ fp->f_pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
+out:
return 0;
}
diff --git a/trunk/fs/fs-writeback.c b/trunk/fs/fs-writeback.c
index a85ac4e33436..3be57189efd5 100644
--- a/trunk/fs/fs-writeback.c
+++ b/trunk/fs/fs-writeback.c
@@ -45,7 +45,6 @@ struct wb_writeback_work {
unsigned int for_kupdate:1;
unsigned int range_cyclic:1;
unsigned int for_background:1;
- unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */
@@ -444,11 +443,9 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
/*
* Make sure to wait on the data before writing out the metadata.
* This is important for filesystems that modify metadata on data
- * I/O completion. We don't do it for sync(2) writeback because it has a
- * separate, external IO completion path and ->sync_fs for guaranteeing
- * inode metadata is written back correctly.
+ * I/O completion.
*/
- if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
+ if (wbc->sync_mode == WB_SYNC_ALL) {
int err = filemap_fdatawait(mapping);
if (ret == 0)
ret = err;
@@ -581,7 +578,6 @@ static long writeback_sb_inodes(struct super_block *sb,
.tagged_writepages = work->tagged_writepages,
.for_kupdate = work->for_kupdate,
.for_background = work->for_background,
- .for_sync = work->for_sync,
.range_cyclic = work->range_cyclic,
.range_start = 0,
.range_end = LLONG_MAX,
@@ -1366,7 +1362,6 @@ void sync_inodes_sb(struct super_block *sb)
.range_cyclic = 0,
.done = &done,
.reason = WB_REASON_SYNC,
- .for_sync = 1,
};
/* Nothing to do? */
diff --git a/trunk/fs/fuse/dir.c b/trunk/fs/fuse/dir.c
index 0eda52738ec4..f3f783dc4f75 100644
--- a/trunk/fs/fuse/dir.c
+++ b/trunk/fs/fuse/dir.c
@@ -14,7 +14,7 @@
#include
#include
-static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx)
+static bool fuse_use_readdirplus(struct inode *dir, struct file *filp)
{
struct fuse_conn *fc = get_fuse_conn(dir);
struct fuse_inode *fi = get_fuse_inode(dir);
@@ -25,7 +25,7 @@ static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx)
return true;
if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state))
return true;
- if (ctx->pos == 0)
+ if (filp->f_pos == 0)
return true;
return false;
}
@@ -1165,23 +1165,25 @@ static int fuse_permission(struct inode *inode, int mask)
}
static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
- struct dir_context *ctx)
+ void *dstbuf, filldir_t filldir)
{
while (nbytes >= FUSE_NAME_OFFSET) {
struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
size_t reclen = FUSE_DIRENT_SIZE(dirent);
+ int over;
if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
return -EIO;
if (reclen > nbytes)
break;
- if (!dir_emit(ctx, dirent->name, dirent->namelen,
- dirent->ino, dirent->type))
+ over = filldir(dstbuf, dirent->name, dirent->namelen,
+ file->f_pos, dirent->ino, dirent->type);
+ if (over)
break;
buf += reclen;
nbytes -= reclen;
- ctx->pos = dirent->off;
+ file->f_pos = dirent->off;
}
return 0;
@@ -1282,7 +1284,7 @@ static int fuse_direntplus_link(struct file *file,
}
static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
- struct dir_context *ctx, u64 attr_version)
+ void *dstbuf, filldir_t filldir, u64 attr_version)
{
struct fuse_direntplus *direntplus;
struct fuse_dirent *dirent;
@@ -1307,9 +1309,10 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
we need to send a FORGET for each of those
which we did not link.
*/
- over = !dir_emit(ctx, dirent->name, dirent->namelen,
- dirent->ino, dirent->type);
- ctx->pos = dirent->off;
+ over = filldir(dstbuf, dirent->name, dirent->namelen,
+ file->f_pos, dirent->ino,
+ dirent->type);
+ file->f_pos = dirent->off;
}
buf += reclen;
@@ -1323,7 +1326,7 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
return 0;
}
-static int fuse_readdir(struct file *file, struct dir_context *ctx)
+static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
{
int plus, err;
size_t nbytes;
@@ -1346,17 +1349,17 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
return -ENOMEM;
}
- plus = fuse_use_readdirplus(inode, ctx);
+ plus = fuse_use_readdirplus(inode, file);
req->out.argpages = 1;
req->num_pages = 1;
req->pages[0] = page;
req->page_descs[0].length = PAGE_SIZE;
if (plus) {
attr_version = fuse_get_attr_version(fc);
- fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
+ fuse_read_fill(req, file, file->f_pos, PAGE_SIZE,
FUSE_READDIRPLUS);
} else {
- fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
+ fuse_read_fill(req, file, file->f_pos, PAGE_SIZE,
FUSE_READDIR);
}
fuse_request_send(fc, req);
@@ -1366,11 +1369,11 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
if (!err) {
if (plus) {
err = parse_dirplusfile(page_address(page), nbytes,
- file, ctx,
+ file, dstbuf, filldir,
attr_version);
} else {
err = parse_dirfile(page_address(page), nbytes, file,
- ctx);
+ dstbuf, filldir);
}
}
@@ -1883,7 +1886,7 @@ static const struct inode_operations fuse_dir_inode_operations = {
static const struct file_operations fuse_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = fuse_readdir,
+ .readdir = fuse_readdir,
.open = fuse_dir_open,
.release = fuse_dir_release,
.fsync = fuse_dir_fsync,
diff --git a/trunk/fs/fuse/file.c b/trunk/fs/fuse/file.c
index 35f281033142..e570081f9f76 100644
--- a/trunk/fs/fuse/file.c
+++ b/trunk/fs/fuse/file.c
@@ -2470,16 +2470,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
.mode = mode
};
int err;
- bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
- (mode & FALLOC_FL_PUNCH_HOLE);
if (fc->no_fallocate)
return -EOPNOTSUPP;
- if (lock_inode) {
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
mutex_lock(&inode->i_mutex);
- if (mode & FALLOC_FL_PUNCH_HOLE)
- fuse_set_nowrite(inode);
+ fuse_set_nowrite(inode);
}
req = fuse_get_req_nopages(fc);
@@ -2514,9 +2511,8 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
fuse_invalidate_attr(inode);
out:
- if (lock_inode) {
- if (mode & FALLOC_FL_PUNCH_HOLE)
- fuse_release_nowrite(inode);
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ fuse_release_nowrite(inode);
mutex_unlock(&inode->i_mutex);
}
diff --git a/trunk/fs/gfs2/aops.c b/trunk/fs/gfs2/aops.c
index ee48ad37d9c0..0bad69ed6336 100644
--- a/trunk/fs/gfs2/aops.c
+++ b/trunk/fs/gfs2/aops.c
@@ -110,7 +110,7 @@ static int gfs2_writepage_common(struct page *page,
/* Is the page fully outside i_size? (truncate in progress) */
offset = i_size & (PAGE_CACHE_SIZE-1);
if (page->index > end_index || (page->index == end_index && !offset)) {
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ page->mapping->a_ops->invalidatepage(page, 0);
goto out;
}
return 1;
@@ -299,8 +299,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
/* Is the page fully outside i_size? (truncate in progress) */
if (page->index > end_index || (page->index == end_index && !offset)) {
- page->mapping->a_ops->invalidatepage(page, 0,
- PAGE_CACHE_SIZE);
+ page->mapping->a_ops->invalidatepage(page, 0);
unlock_page(page);
continue;
}
@@ -944,33 +943,27 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
unlock_buffer(bh);
}
-static void gfs2_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void gfs2_invalidatepage(struct page *page, unsigned long offset)
{
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
- unsigned int stop = offset + length;
- int partial_page = (offset || length < PAGE_CACHE_SIZE);
struct buffer_head *bh, *head;
unsigned long pos = 0;
BUG_ON(!PageLocked(page));
- if (!partial_page)
+ if (offset == 0)
ClearPageChecked(page);
if (!page_has_buffers(page))
goto out;
bh = head = page_buffers(page);
do {
- if (pos + bh->b_size > stop)
- return;
-
if (offset <= pos)
gfs2_discard(sdp, bh);
pos += bh->b_size;
bh = bh->b_this_page;
} while (bh != head);
out:
- if (!partial_page)
+ if (offset == 0)
try_to_release_page(page, 0);
}
diff --git a/trunk/fs/gfs2/bmap.c b/trunk/fs/gfs2/bmap.c
index 5e2f56fccf6b..93b5809c20bb 100644
--- a/trunk/fs/gfs2/bmap.c
+++ b/trunk/fs/gfs2/bmap.c
@@ -1232,9 +1232,7 @@ static int do_grow(struct inode *inode, u64 size)
unstuff = 1;
}
- error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
- (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
- 0 : RES_QUOTA), 0);
+ error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0);
if (error)
goto do_grow_release;
diff --git a/trunk/fs/gfs2/dir.c b/trunk/fs/gfs2/dir.c
index 0cb4c1557f20..b631c9043460 100644
--- a/trunk/fs/gfs2/dir.c
+++ b/trunk/fs/gfs2/dir.c
@@ -1125,14 +1125,13 @@ static int dir_double_exhash(struct gfs2_inode *dip)
if (IS_ERR(hc))
return PTR_ERR(hc);
- hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN);
+ h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN);
if (hc2 == NULL)
hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL);
if (!hc2)
return -ENOMEM;
- h = hc2;
error = gfs2_meta_inode_buffer(dip, &dibh);
if (error)
goto out_kfree;
@@ -1213,7 +1212,9 @@ static int compare_dents(const void *a, const void *b)
/**
* do_filldir_main - read out directory entries
* @dip: The GFS2 inode
- * @ctx: what to feed the entries to
+ * @offset: The offset in the file to read from
+ * @opaque: opaque data to pass to filldir
+ * @filldir: The function to pass entries to
* @darr: an array of struct gfs2_dirent pointers to read
* @entries: the number of entries in darr
* @copied: pointer to int that's non-zero if a entry has been copied out
@@ -1223,10 +1224,11 @@ static int compare_dents(const void *a, const void *b)
* the possibility that they will fall into different readdir buffers or
* that someone will want to seek to that location.
*
- * Returns: errno, >0 if the actor tells you to stop
+ * Returns: errno, >0 on exception from filldir
*/
-static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx,
+static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
+ void *opaque, filldir_t filldir,
const struct gfs2_dirent **darr, u32 entries,
int *copied)
{
@@ -1234,6 +1236,7 @@ static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx,
u64 off, off_next;
unsigned int x, y;
int run = 0;
+ int error = 0;
sort(darr, entries, sizeof(struct gfs2_dirent *), compare_dents, NULL);
@@ -1250,9 +1253,9 @@ static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx,
off_next = be32_to_cpu(dent_next->de_hash);
off_next = gfs2_disk_hash2offset(off_next);
- if (off < ctx->pos)
+ if (off < *offset)
continue;
- ctx->pos = off;
+ *offset = off;
if (off_next == off) {
if (*copied && !run)
@@ -1261,25 +1264,26 @@ static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx,
} else
run = 0;
} else {
- if (off < ctx->pos)
+ if (off < *offset)
continue;
- ctx->pos = off;
+ *offset = off;
}
- if (!dir_emit(ctx, (const char *)(dent + 1),
+ error = filldir(opaque, (const char *)(dent + 1),
be16_to_cpu(dent->de_name_len),
- be64_to_cpu(dent->de_inum.no_addr),
- be16_to_cpu(dent->de_type)))
+ off, be64_to_cpu(dent->de_inum.no_addr),
+ be16_to_cpu(dent->de_type));
+ if (error)
return 1;
*copied = 1;
}
- /* Increment the ctx->pos by one, so the next time we come into the
+ /* Increment the *offset by one, so the next time we come into the
do_filldir fxn, we get the next entry instead of the last one in the
current leaf */
- ctx->pos++;
+ (*offset)++;
return 0;
}
@@ -1303,8 +1307,8 @@ static void gfs2_free_sort_buffer(void *ptr)
kfree(ptr);
}
-static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx,
- int *copied, unsigned *depth,
+static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
+ filldir_t filldir, int *copied, unsigned *depth,
u64 leaf_no)
{
struct gfs2_inode *ip = GFS2_I(inode);
@@ -1382,7 +1386,8 @@ static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx,
} while(lfn);
BUG_ON(entries2 != entries);
- error = do_filldir_main(ip, ctx, darr, entries, copied);
+ error = do_filldir_main(ip, offset, opaque, filldir, darr,
+ entries, copied);
out_free:
for(i = 0; i < leaf; i++)
brelse(larr[i]);
@@ -1441,13 +1446,15 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
/**
* dir_e_read - Reads the entries from a directory into a filldir buffer
* @dip: dinode pointer
- * @ctx: actor to feed the entries to
+ * @offset: the hash of the last entry read shifted to the right once
+ * @opaque: buffer for the filldir function to fill
+ * @filldir: points to the filldir function to use
*
* Returns: errno
*/
-static int dir_e_read(struct inode *inode, struct dir_context *ctx,
- struct file_ra_state *f_ra)
+static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
+ filldir_t filldir, struct file_ra_state *f_ra)
{
struct gfs2_inode *dip = GFS2_I(inode);
u32 hsize, len = 0;
@@ -1458,7 +1465,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx,
unsigned depth = 0;
hsize = 1 << dip->i_depth;
- hash = gfs2_dir_offset2hash(ctx->pos);
+ hash = gfs2_dir_offset2hash(*offset);
index = hash >> (32 - dip->i_depth);
if (dip->i_hash_cache == NULL)
@@ -1470,7 +1477,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx,
gfs2_dir_readahead(inode, hsize, index, f_ra);
while (index < hsize) {
- error = gfs2_dir_read_leaf(inode, ctx,
+ error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
&copied, &depth,
be64_to_cpu(lp[index]));
if (error)
@@ -1485,8 +1492,8 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx,
return error;
}
-int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
- struct file_ra_state *f_ra)
+int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
+ filldir_t filldir, struct file_ra_state *f_ra)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -1500,7 +1507,7 @@ int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
return 0;
if (dip->i_diskflags & GFS2_DIF_EXHASH)
- return dir_e_read(inode, ctx, f_ra);
+ return dir_e_read(inode, offset, opaque, filldir, f_ra);
if (!gfs2_is_stuffed(dip)) {
gfs2_consist_inode(dip);
@@ -1532,7 +1539,7 @@ int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
error = -EIO;
goto out;
}
- error = do_filldir_main(dip, ctx, darr,
+ error = do_filldir_main(dip, offset, opaque, filldir, darr,
dip->i_entries, &copied);
out:
kfree(darr);
@@ -1548,9 +1555,9 @@ int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
/**
* gfs2_dir_search - Search a directory
- * @dip: The GFS2 dir inode
- * @name: The name we are looking up
- * @fail_on_exist: Fail if the name exists rather than looking it up
+ * @dip: The GFS2 inode
+ * @filename:
+ * @inode:
*
* This routine searches a directory for a file or another directory.
* Assumes a glock is held on dip.
@@ -1558,25 +1565,22 @@ int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
* Returns: errno
*/
-struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name,
- bool fail_on_exist)
+struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
{
struct buffer_head *bh;
struct gfs2_dirent *dent;
- u64 addr, formal_ino;
- u16 dtype;
+ struct inode *inode;
dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
if (dent) {
if (IS_ERR(dent))
return ERR_CAST(dent);
- dtype = be16_to_cpu(dent->de_type);
- addr = be64_to_cpu(dent->de_inum.no_addr);
- formal_ino = be64_to_cpu(dent->de_inum.no_formal_ino);
+ inode = gfs2_inode_lookup(dir->i_sb,
+ be16_to_cpu(dent->de_type),
+ be64_to_cpu(dent->de_inum.no_addr),
+ be64_to_cpu(dent->de_inum.no_formal_ino), 0);
brelse(bh);
- if (fail_on_exist)
- return ERR_PTR(-EEXIST);
- return gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino, 0);
+ return inode;
}
return ERR_PTR(-ENOENT);
}
diff --git a/trunk/fs/gfs2/dir.h b/trunk/fs/gfs2/dir.h
index 4f03bbd1873f..98c960beab35 100644
--- a/trunk/fs/gfs2/dir.h
+++ b/trunk/fs/gfs2/dir.h
@@ -18,15 +18,14 @@ struct gfs2_inode;
struct gfs2_inum;
extern struct inode *gfs2_dir_search(struct inode *dir,
- const struct qstr *filename,
- bool fail_on_exist);
+ const struct qstr *filename);
extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
const struct gfs2_inode *ip);
extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
const struct gfs2_inode *ip);
extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
-extern int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
- struct file_ra_state *f_ra);
+extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
+ filldir_t filldir, struct file_ra_state *f_ra);
extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
const struct gfs2_inode *nip, unsigned int new_type);
diff --git a/trunk/fs/gfs2/export.c b/trunk/fs/gfs2/export.c
index 8b9b3775e2e7..9973df4ff565 100644
--- a/trunk/fs/gfs2/export.c
+++ b/trunk/fs/gfs2/export.c
@@ -64,7 +64,6 @@ static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len,
}
struct get_name_filldir {
- struct dir_context ctx;
struct gfs2_inum_host inum;
char *name;
};
@@ -89,11 +88,9 @@ static int gfs2_get_name(struct dentry *parent, char *name,
struct inode *dir = parent->d_inode;
struct inode *inode = child->d_inode;
struct gfs2_inode *dip, *ip;
- struct get_name_filldir gnfd = {
- .ctx.actor = get_name_filldir,
- .name = name
- };
+ struct get_name_filldir gnfd;
struct gfs2_holder gh;
+ u64 offset = 0;
int error;
struct file_ra_state f_ra = { .start = 0 };
@@ -109,12 +106,13 @@ static int gfs2_get_name(struct dentry *parent, char *name,
*name = 0;
gnfd.inum.no_addr = ip->i_no_addr;
gnfd.inum.no_formal_ino = ip->i_no_formal_ino;
+ gnfd.name = name;
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh);
if (error)
return error;
- error = gfs2_dir_read(dir, &gnfd.ctx, &f_ra);
+ error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir, &f_ra);
gfs2_glock_dq_uninit(&gh);
diff --git a/trunk/fs/gfs2/file.c b/trunk/fs/gfs2/file.c
index f99f9e8a325f..ad0dc38d87ab 100644
--- a/trunk/fs/gfs2/file.c
+++ b/trunk/fs/gfs2/file.c
@@ -82,28 +82,35 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
}
/**
- * gfs2_readdir - Iterator for a directory
+ * gfs2_readdir - Read directory entries from a directory
* @file: The directory to read from
- * @ctx: What to feed directory entries to
+ * @dirent: Buffer for dirents
+ * @filldir: Function used to do the copying
*
* Returns: errno
*/
-static int gfs2_readdir(struct file *file, struct dir_context *ctx)
+static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
{
struct inode *dir = file->f_mapping->host;
struct gfs2_inode *dip = GFS2_I(dir);
struct gfs2_holder d_gh;
+ u64 offset = file->f_pos;
int error;
- error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
- if (error)
+ gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
+ error = gfs2_glock_nq(&d_gh);
+ if (error) {
+ gfs2_holder_uninit(&d_gh);
return error;
+ }
- error = gfs2_dir_read(dir, ctx, &file->f_ra);
+ error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
gfs2_glock_dq_uninit(&d_gh);
+ file->f_pos = offset;
+
return error;
}
@@ -531,30 +538,21 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
}
/**
- * gfs2_open_common - This is common to open and atomic_open
- * @inode: The inode being opened
- * @file: The file being opened
- *
- * This maybe called under a glock or not depending upon how it has
- * been called. We must always be called under a glock for regular
- * files, however. For other file types, it does not matter whether
- * we hold the glock or not.
+ * gfs2_open - open a file
+ * @inode: the inode to open
+ * @file: the struct file for this opening
*
- * Returns: Error code or 0 for success
+ * Returns: errno
*/
-int gfs2_open_common(struct inode *inode, struct file *file)
+static int gfs2_open(struct inode *inode, struct file *file)
{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_holder i_gh;
struct gfs2_file *fp;
- int ret;
-
- if (S_ISREG(inode->i_mode)) {
- ret = generic_file_open(inode, file);
- if (ret)
- return ret;
- }
+ int error;
- fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
+ fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
if (!fp)
return -ENOMEM;
@@ -562,43 +560,29 @@ int gfs2_open_common(struct inode *inode, struct file *file)
gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
file->private_data = fp;
- return 0;
-}
-
-/**
- * gfs2_open - open a file
- * @inode: the inode to open
- * @file: the struct file for this opening
- *
- * After atomic_open, this function is only used for opening files
- * which are already cached. We must still get the glock for regular
- * files to ensure that we have the file size uptodate for the large
- * file check which is in the common code. That is only an issue for
- * regular files though.
- *
- * Returns: errno
- */
-
-static int gfs2_open(struct inode *inode, struct file *file)
-{
- struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_holder i_gh;
- int error;
- bool need_unlock = false;
if (S_ISREG(ip->i_inode.i_mode)) {
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
&i_gh);
if (error)
- return error;
- need_unlock = true;
- }
+ goto fail;
- error = gfs2_open_common(inode, file);
+ if (!(file->f_flags & O_LARGEFILE) &&
+ i_size_read(inode) > MAX_NON_LFS) {
+ error = -EOVERFLOW;
+ goto fail_gunlock;
+ }
- if (need_unlock)
gfs2_glock_dq_uninit(&i_gh);
+ }
+ return 0;
+
+fail_gunlock:
+ gfs2_glock_dq_uninit(&i_gh);
+fail:
+ file->private_data = NULL;
+ kfree(fp);
return error;
}
@@ -1064,7 +1048,7 @@ const struct file_operations gfs2_file_fops = {
};
const struct file_operations gfs2_dir_fops = {
- .iterate = gfs2_readdir,
+ .readdir = gfs2_readdir,
.unlocked_ioctl = gfs2_ioctl,
.open = gfs2_open,
.release = gfs2_release,
@@ -1094,7 +1078,7 @@ const struct file_operations gfs2_file_fops_nolock = {
};
const struct file_operations gfs2_dir_fops_nolock = {
- .iterate = gfs2_readdir,
+ .readdir = gfs2_readdir,
.unlocked_ioctl = gfs2_ioctl,
.open = gfs2_open,
.release = gfs2_release,
diff --git a/trunk/fs/gfs2/glops.c b/trunk/fs/gfs2/glops.c
index 5f2e5224c51c..c66e99c97571 100644
--- a/trunk/fs/gfs2/glops.c
+++ b/trunk/fs/gfs2/glops.c
@@ -54,6 +54,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
struct gfs2_bufdata *bd, *tmp;
struct buffer_head *bh;
const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
+ sector_t blocknr;
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_ail_lock);
@@ -64,6 +65,13 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
continue;
gfs2_ail_error(gl, bh);
}
+ blocknr = bh->b_blocknr;
+ bh->b_private = NULL;
+ gfs2_remove_from_ail(bd); /* drops ref on bh */
+
+ bd->bd_bh = NULL;
+ bd->bd_blkno = blocknr;
+
gfs2_trans_add_revoke(sdp, bd);
}
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
diff --git a/trunk/fs/gfs2/inode.c b/trunk/fs/gfs2/inode.c
index bbb2715171cd..62b484e4a9e4 100644
--- a/trunk/fs/gfs2/inode.c
+++ b/trunk/fs/gfs2/inode.c
@@ -313,7 +313,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
goto out;
}
- inode = gfs2_dir_search(dir, name, false);
+ inode = gfs2_dir_search(dir, name);
if (IS_ERR(inode))
error = PTR_ERR(inode);
out:
@@ -346,6 +346,17 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
if (!dip->i_inode.i_nlink)
return -ENOENT;
+ error = gfs2_dir_check(&dip->i_inode, name, NULL);
+ switch (error) {
+ case -ENOENT:
+ error = 0;
+ break;
+ case 0:
+ return -EEXIST;
+ default:
+ return error;
+ }
+
if (dip->i_entries == (u32)-1)
return -EFBIG;
if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
@@ -535,7 +546,6 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
* gfs2_create_inode - Create a new inode
* @dir: The parent directory
* @dentry: The new dentry
- * @file: If non-NULL, the file which is being opened
* @mode: The permissions on the new inode
* @dev: For device nodes, this is the device number
* @symname: For symlinks, this is the link destination
@@ -545,9 +555,8 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
*/
static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
- struct file *file,
umode_t mode, dev_t dev, const char *symname,
- unsigned int size, int excl, int *opened)
+ unsigned int size, int excl)
{
const struct qstr *name = &dentry->d_name;
struct gfs2_holder ghs[2];
@@ -555,7 +564,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
struct gfs2_inode *dip = GFS2_I(dir), *ip;
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_glock *io_gl;
- struct dentry *d;
int error;
u32 aflags = 0;
int arq;
@@ -576,29 +584,14 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
goto fail;
error = create_ok(dip, name, mode);
- if (error)
- goto fail_gunlock;
-
- inode = gfs2_dir_search(dir, &dentry->d_name, !S_ISREG(mode) || excl);
- error = PTR_ERR(inode);
- if (!IS_ERR(inode)) {
- d = d_splice_alias(inode, dentry);
- error = 0;
- if (file && !IS_ERR(d)) {
- if (d == NULL)
- d = dentry;
- if (S_ISREG(inode->i_mode))
- error = finish_open(file, d, gfs2_open_common, opened);
- else
- error = finish_no_open(file, d);
- }
+ if ((error == -EEXIST) && S_ISREG(mode) && !excl) {
+ inode = gfs2_lookupi(dir, &dentry->d_name, 0);
gfs2_glock_dq_uninit(ghs);
- if (IS_ERR(d))
- return PTR_RET(d);
- return error;
- } else if (error != -ENOENT) {
- goto fail_gunlock;
+ d_instantiate(dentry, inode);
+ return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
+ if (error)
+ goto fail_gunlock;
arq = error = gfs2_diradd_alloc_required(dir, name);
if (error < 0)
@@ -693,12 +686,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
goto fail_gunlock3;
mark_inode_dirty(inode);
- d_instantiate(dentry, inode);
- if (file)
- error = finish_open(file, dentry, gfs2_open_common, opened);
gfs2_glock_dq_uninit(ghs);
gfs2_glock_dq_uninit(ghs + 1);
- return error;
+ d_instantiate(dentry, inode);
+ return 0;
fail_gunlock3:
gfs2_glock_dq_uninit(ghs + 1);
@@ -738,56 +729,36 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
static int gfs2_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
- return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl, NULL);
+ return gfs2_create_inode(dir, dentry, S_IFREG | mode, 0, NULL, 0, excl);
}
/**
- * __gfs2_lookup - Look up a filename in a directory and return its inode
+ * gfs2_lookup - Look up a filename in a directory and return its inode
* @dir: The directory inode
* @dentry: The dentry of the new inode
- * @file: File to be opened
- * @opened: atomic_open flags
+ * @nd: passed from Linux VFS, ignored by us
*
+ * Called by the VFS layer. Lock dir and call gfs2_lookupi()
*
* Returns: errno
*/
-static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
- struct file *file, int *opened)
-{
- struct inode *inode;
- struct dentry *d;
- struct gfs2_holder gh;
- struct gfs2_glock *gl;
- int error;
-
- inode = gfs2_lookupi(dir, &dentry->d_name, 0);
- if (!inode)
- return NULL;
- if (IS_ERR(inode))
- return ERR_CAST(inode);
-
- gl = GFS2_I(inode)->i_gl;
- error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
- if (error) {
- iput(inode);
- return ERR_PTR(error);
- }
-
- d = d_splice_alias(inode, dentry);
- if (file && S_ISREG(inode->i_mode))
- error = finish_open(file, dentry, gfs2_open_common, opened);
-
- gfs2_glock_dq_uninit(&gh);
- if (error)
- return ERR_PTR(error);
- return d;
-}
-
static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
- unsigned flags)
+ unsigned int flags)
{
- return __gfs2_lookup(dir, dentry, NULL, NULL);
+ struct inode *inode = gfs2_lookupi(dir, &dentry->d_name, 0);
+ if (inode && !IS_ERR(inode)) {
+ struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
+ struct gfs2_holder gh;
+ int error;
+ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
+ if (error) {
+ iput(inode);
+ return ERR_PTR(error);
+ }
+ gfs2_glock_dq_uninit(&gh);
+ }
+ return d_splice_alias(inode, dentry);
}
/**
@@ -1105,7 +1076,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1)
return -ENAMETOOLONG;
- return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0, NULL);
+ return gfs2_create_inode(dir, dentry, S_IFLNK | S_IRWXUGO, 0, symname, size, 0);
}
/**
@@ -1121,7 +1092,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct gfs2_sbd *sdp = GFS2_SB(dir);
unsigned dsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
- return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0, NULL);
+ return gfs2_create_inode(dir, dentry, S_IFDIR | mode, 0, NULL, dsize, 0);
}
/**
@@ -1136,43 +1107,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t dev)
{
- return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0, NULL);
-}
-
-/**
- * gfs2_atomic_open - Atomically open a file
- * @dir: The directory
- * @dentry: The proposed new entry
- * @file: The proposed new struct file
- * @flags: open flags
- * @mode: File mode
- * @opened: Flag to say whether the file has been opened or not
- *
- * Returns: error code or 0 for success
- */
-
-static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags,
- umode_t mode, int *opened)
-{
- struct dentry *d;
- bool excl = !!(flags & O_EXCL);
-
- d = __gfs2_lookup(dir, dentry, file, opened);
- if (IS_ERR(d))
- return PTR_ERR(d);
- if (d == NULL)
- d = dentry;
- if (d->d_inode) {
- if (!(*opened & FILE_OPENED))
- return finish_no_open(file, d);
- return 0;
- }
-
- if (!(flags & O_CREAT))
- return -ENOENT;
-
- return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl, opened);
+ return gfs2_create_inode(dir, dentry, mode, dev, NULL, 0, 0);
}
/*
@@ -1852,7 +1787,6 @@ const struct inode_operations gfs2_dir_iops = {
.removexattr = gfs2_removexattr,
.fiemap = gfs2_fiemap,
.get_acl = gfs2_get_acl,
- .atomic_open = gfs2_atomic_open,
};
const struct inode_operations gfs2_symlink_iops = {
diff --git a/trunk/fs/gfs2/inode.h b/trunk/fs/gfs2/inode.h
index ba4d9492d422..c53c7477f6da 100644
--- a/trunk/fs/gfs2/inode.h
+++ b/trunk/fs/gfs2/inode.h
@@ -109,7 +109,6 @@ extern int gfs2_permission(struct inode *inode, int mask);
extern int gfs2_setattr_simple(struct inode *inode, struct iattr *attr);
extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
-extern int gfs2_open_common(struct inode *inode, struct file *file);
extern const struct inode_operations gfs2_file_iops;
extern const struct inode_operations gfs2_dir_iops;
diff --git a/trunk/fs/gfs2/log.c b/trunk/fs/gfs2/log.c
index 610613fb65b5..b404f4853034 100644
--- a/trunk/fs/gfs2/log.c
+++ b/trunk/fs/gfs2/log.c
@@ -211,16 +211,15 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
{
struct gfs2_trans *tr, *s;
- int oldest_tr = 1;
int ret;
spin_lock(&sdp->sd_ail_lock);
list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
gfs2_ail1_empty_one(sdp, tr);
- if (list_empty(&tr->tr_ail1_list) && oldest_tr)
+ if (list_empty(&tr->tr_ail1_list))
list_move(&tr->tr_list, &sdp->sd_ail2_list);
else
- oldest_tr = 0;
+ break;
}
ret = list_empty(&sdp->sd_ail1_list);
spin_unlock(&sdp->sd_ail_lock);
@@ -318,7 +317,7 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
{
- unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
+ unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
unsigned wanted = blks + reserved_blks;
DEFINE_WAIT(wait);
int did_wait = 0;
@@ -546,76 +545,6 @@ void gfs2_ordered_del_inode(struct gfs2_inode *ip)
spin_unlock(&sdp->sd_ordered_lock);
}
-void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
-{
- struct buffer_head *bh = bd->bd_bh;
- struct gfs2_glock *gl = bd->bd_gl;
-
- gfs2_remove_from_ail(bd);
- bd->bd_bh = NULL;
- bh->b_private = NULL;
- bd->bd_blkno = bh->b_blocknr;
- bd->bd_ops = &gfs2_revoke_lops;
- sdp->sd_log_num_revoke++;
- atomic_inc(&gl->gl_revokes);
- set_bit(GLF_LFLUSH, &gl->gl_flags);
- list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
-}
-
-void gfs2_write_revokes(struct gfs2_sbd *sdp)
-{
- struct gfs2_trans *tr;
- struct gfs2_bufdata *bd, *tmp;
- int have_revokes = 0;
- int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
-
- gfs2_ail1_empty(sdp);
- spin_lock(&sdp->sd_ail_lock);
- list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
- list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
- if (list_empty(&bd->bd_list)) {
- have_revokes = 1;
- goto done;
- }
- }
- }
-done:
- spin_unlock(&sdp->sd_ail_lock);
- if (have_revokes == 0)
- return;
- while (sdp->sd_log_num_revoke > max_revokes)
- max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
- max_revokes -= sdp->sd_log_num_revoke;
- if (!sdp->sd_log_num_revoke) {
- atomic_dec(&sdp->sd_log_blks_free);
- /* If no blocks have been reserved, we need to also
- * reserve a block for the header */
- if (!sdp->sd_log_blks_reserved)
- atomic_dec(&sdp->sd_log_blks_free);
- }
- gfs2_log_lock(sdp);
- spin_lock(&sdp->sd_ail_lock);
- list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
- list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
- if (max_revokes == 0)
- goto out_of_blocks;
- if (!list_empty(&bd->bd_list))
- continue;
- gfs2_add_revoke(sdp, bd);
- max_revokes--;
- }
- }
-out_of_blocks:
- spin_unlock(&sdp->sd_ail_lock);
- gfs2_log_unlock(sdp);
-
- if (!sdp->sd_log_num_revoke) {
- atomic_inc(&sdp->sd_log_blks_free);
- if (!sdp->sd_log_blks_reserved)
- atomic_inc(&sdp->sd_log_blks_free);
- }
-}
-
/**
* log_write_header - Get and initialize a journal header buffer
* @sdp: The GFS2 superblock
@@ -633,6 +562,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
lh = page_address(page);
clear_page(lh);
+ gfs2_ail1_empty(sdp);
tail = current_tail(sdp);
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
diff --git a/trunk/fs/gfs2/log.h b/trunk/fs/gfs2/log.h
index 37216634f0aa..3566f35915e0 100644
--- a/trunk/fs/gfs2/log.h
+++ b/trunk/fs/gfs2/log.h
@@ -72,7 +72,5 @@ extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
extern void gfs2_log_shutdown(struct gfs2_sbd *sdp);
extern void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
extern int gfs2_logd(void *data);
-extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
-extern void gfs2_write_revokes(struct gfs2_sbd *sdp);
#endif /* __LOG_DOT_H__ */
diff --git a/trunk/fs/gfs2/lops.c b/trunk/fs/gfs2/lops.c
index 17c5b5d7dc88..6c33d7b6e0c4 100644
--- a/trunk/fs/gfs2/lops.c
+++ b/trunk/fs/gfs2/lops.c
@@ -16,7 +16,6 @@
#include
#include
#include
-#include
#include "gfs2.h"
#include "incore.h"
@@ -402,20 +401,6 @@ static void gfs2_check_magic(struct buffer_head *bh)
kunmap_atomic(kaddr);
}
-static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
-{
- struct gfs2_bufdata *bda, *bdb;
-
- bda = list_entry(a, struct gfs2_bufdata, bd_list);
- bdb = list_entry(b, struct gfs2_bufdata, bd_list);
-
- if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
- return -1;
- if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
- return 1;
- return 0;
-}
-
static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
unsigned int total, struct list_head *blist,
bool is_databuf)
@@ -428,7 +413,6 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
__be64 *ptr;
gfs2_log_lock(sdp);
- list_sort(NULL, blist, blocknr_cmp);
bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
while(total) {
num = total;
@@ -606,7 +590,6 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
struct page *page;
unsigned int length;
- gfs2_write_revokes(sdp);
if (!sdp->sd_log_num_revoke)
return;
@@ -853,6 +836,10 @@ const struct gfs2_log_operations gfs2_revoke_lops = {
.lo_name = "revoke",
};
+const struct gfs2_log_operations gfs2_rg_lops = {
+ .lo_name = "rg",
+};
+
const struct gfs2_log_operations gfs2_databuf_lops = {
.lo_before_commit = databuf_lo_before_commit,
.lo_after_commit = databuf_lo_after_commit,
@@ -864,6 +851,7 @@ const struct gfs2_log_operations gfs2_databuf_lops = {
const struct gfs2_log_operations *gfs2_log_ops[] = {
&gfs2_databuf_lops,
&gfs2_buf_lops,
+ &gfs2_rg_lops,
&gfs2_revoke_lops,
NULL,
};
diff --git a/trunk/fs/gfs2/lops.h b/trunk/fs/gfs2/lops.h
index 9ca2e6438419..87e062e05c92 100644
--- a/trunk/fs/gfs2/lops.h
+++ b/trunk/fs/gfs2/lops.h
@@ -23,6 +23,7 @@
extern const struct gfs2_log_operations gfs2_glock_lops;
extern const struct gfs2_log_operations gfs2_buf_lops;
extern const struct gfs2_log_operations gfs2_revoke_lops;
+extern const struct gfs2_log_operations gfs2_rg_lops;
extern const struct gfs2_log_operations gfs2_databuf_lops;
extern const struct gfs2_log_operations *gfs2_log_ops[];
diff --git a/trunk/fs/gfs2/meta_io.c b/trunk/fs/gfs2/meta_io.c
index 0da390686c08..1a89afb68472 100644
--- a/trunk/fs/gfs2/meta_io.c
+++ b/trunk/fs/gfs2/meta_io.c
@@ -296,6 +296,10 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
if (bd) {
spin_lock(&sdp->sd_ail_lock);
if (bd->bd_tr) {
+ gfs2_remove_from_ail(bd);
+ bh->b_private = NULL;
+ bd->bd_bh = NULL;
+ bd->bd_blkno = bh->b_blocknr;
gfs2_trans_add_revoke(sdp, bd);
}
spin_unlock(&sdp->sd_ail_lock);
diff --git a/trunk/fs/gfs2/ops_fstype.c b/trunk/fs/gfs2/ops_fstype.c
index 0262c190b6f9..60ede2a0f43f 100644
--- a/trunk/fs/gfs2/ops_fstype.c
+++ b/trunk/fs/gfs2/ops_fstype.c
@@ -916,16 +916,16 @@ static int init_threads(struct gfs2_sbd *sdp, int undo)
goto fail_quotad;
p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
- if (IS_ERR(p)) {
- error = PTR_ERR(p);
+ error = IS_ERR(p);
+ if (error) {
fs_err(sdp, "can't start logd thread: %d\n", error);
return error;
}
sdp->sd_logd_process = p;
p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
- if (IS_ERR(p)) {
- error = PTR_ERR(p);
+ error = IS_ERR(p);
+ if (error) {
fs_err(sdp, "can't start quotad thread: %d\n", error);
goto fail;
}
diff --git a/trunk/fs/gfs2/quota.c b/trunk/fs/gfs2/quota.c
index 3768c2f40e43..c253b13722e8 100644
--- a/trunk/fs/gfs2/quota.c
+++ b/trunk/fs/gfs2/quota.c
@@ -1154,6 +1154,11 @@ int gfs2_quota_sync(struct super_block *sb, int type)
return error;
}
+static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
+{
+ return gfs2_quota_sync(sb, type);
+}
+
int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
{
struct gfs2_quota_data *qd;
@@ -1409,7 +1414,7 @@ int gfs2_quotad(void *data)
&tune->gt_statfs_quantum);
/* Update quota file */
- quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
+ quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
"ad_timeo, &tune->gt_quota_quantum);
/* Check for & recover partially truncated inodes */
diff --git a/trunk/fs/gfs2/rgrp.c b/trunk/fs/gfs2/rgrp.c
index 69317435faa7..9809156e3d04 100644
--- a/trunk/fs/gfs2/rgrp.c
+++ b/trunk/fs/gfs2/rgrp.c
@@ -1288,15 +1288,13 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
minlen = max_t(u64, r.minlen,
q->limits.discard_granularity) >> bs_shift;
- if (end <= start || minlen > sdp->sd_max_rg_data)
- return -EINVAL;
-
rgd = gfs2_blk2rgrpd(sdp, start, 0);
- rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
+ rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0);
- if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
- && (start > rgd_end->rd_data0 + rgd_end->rd_data))
- return -EINVAL; /* start is beyond the end of the fs */
+ if (end <= start ||
+ minlen > sdp->sd_max_rg_data ||
+ start > rgd_end->rd_data0 + rgd_end->rd_data)
+ return -EINVAL;
while (1) {
@@ -1338,7 +1336,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
}
out:
- r.len = trimmed << bs_shift;
+ r.len = trimmed << 9;
if (copy_to_user(argp, &r, sizeof(r)))
return -EFAULT;
diff --git a/trunk/fs/gfs2/trans.c b/trunk/fs/gfs2/trans.c
index 2b20d7046bf3..7374907742a8 100644
--- a/trunk/fs/gfs2/trans.c
+++ b/trunk/fs/gfs2/trans.c
@@ -270,12 +270,19 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
{
+ struct gfs2_glock *gl = bd->bd_gl;
struct gfs2_trans *tr = current->journal_info;
BUG_ON(!list_empty(&bd->bd_list));
- gfs2_add_revoke(sdp, bd);
+ BUG_ON(!list_empty(&bd->bd_ail_st_list));
+ BUG_ON(!list_empty(&bd->bd_ail_gl_list));
+ bd->bd_ops = &gfs2_revoke_lops;
tr->tr_touched = 1;
tr->tr_num_revoke++;
+ sdp->sd_log_num_revoke++;
+ atomic_inc(&gl->gl_revokes);
+ set_bit(GLF_LFLUSH, &gl->gl_flags);
+ list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
}
void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
diff --git a/trunk/fs/hfs/dir.c b/trunk/fs/hfs/dir.c
index 145566851e7a..e0101b6fb0d7 100644
--- a/trunk/fs/hfs/dir.c
+++ b/trunk/fs/hfs/dir.c
@@ -51,9 +51,9 @@ static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry,
/*
* hfs_readdir
*/
-static int hfs_readdir(struct file *file, struct dir_context *ctx)
+static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
int len, err;
char strbuf[HFS_MAX_NAMELEN];
@@ -62,7 +62,7 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx)
struct hfs_readdir_data *rd;
u16 type;
- if (ctx->pos >= inode->i_size)
+ if (filp->f_pos >= inode->i_size)
return 0;
err = hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
@@ -73,13 +73,14 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx)
if (err)
goto out;
- if (ctx->pos == 0) {
+ switch ((u32)filp->f_pos) {
+ case 0:
/* This is completely artificial... */
- if (!dir_emit_dot(file, ctx))
+ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
goto out;
- ctx->pos = 1;
- }
- if (ctx->pos == 1) {
+ filp->f_pos++;
+ /* fall through */
+ case 1:
if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
err = -EIO;
goto out;
@@ -96,16 +97,18 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx)
// err = -EIO;
// goto out;
//}
- if (!dir_emit(ctx, "..", 2,
+ if (filldir(dirent, "..", 2, 1,
be32_to_cpu(entry.thread.ParID), DT_DIR))
goto out;
- ctx->pos = 2;
+ filp->f_pos++;
+ /* fall through */
+ default:
+ if (filp->f_pos >= inode->i_size)
+ goto out;
+ err = hfs_brec_goto(&fd, filp->f_pos - 1);
+ if (err)
+ goto out;
}
- if (ctx->pos >= inode->i_size)
- goto out;
- err = hfs_brec_goto(&fd, ctx->pos - 1);
- if (err)
- goto out;
for (;;) {
if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) {
@@ -128,7 +131,7 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx)
err = -EIO;
goto out;
}
- if (!dir_emit(ctx, strbuf, len,
+ if (filldir(dirent, strbuf, len, filp->f_pos,
be32_to_cpu(entry.dir.DirID), DT_DIR))
break;
} else if (type == HFS_CDR_FIL) {
@@ -137,7 +140,7 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx)
err = -EIO;
goto out;
}
- if (!dir_emit(ctx, strbuf, len,
+ if (filldir(dirent, strbuf, len, filp->f_pos,
be32_to_cpu(entry.file.FlNum), DT_REG))
break;
} else {
@@ -145,22 +148,22 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx)
err = -EIO;
goto out;
}
- ctx->pos++;
- if (ctx->pos >= inode->i_size)
+ filp->f_pos++;
+ if (filp->f_pos >= inode->i_size)
goto out;
err = hfs_brec_goto(&fd, 1);
if (err)
goto out;
}
- rd = file->private_data;
+ rd = filp->private_data;
if (!rd) {
rd = kmalloc(sizeof(struct hfs_readdir_data), GFP_KERNEL);
if (!rd) {
err = -ENOMEM;
goto out;
}
- file->private_data = rd;
- rd->file = file;
+ filp->private_data = rd;
+ rd->file = filp;
list_add(&rd->list, &HFS_I(inode)->open_dir_list);
}
memcpy(&rd->key, &fd.key, sizeof(struct hfs_cat_key));
@@ -303,7 +306,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
const struct file_operations hfs_dir_operations = {
.read = generic_read_dir,
- .iterate = hfs_readdir,
+ .readdir = hfs_readdir,
.llseek = generic_file_llseek,
.release = hfs_dir_release,
};
diff --git a/trunk/fs/hfsplus/dir.c b/trunk/fs/hfsplus/dir.c
index d8ce4bd17fc5..a37ac934732f 100644
--- a/trunk/fs/hfsplus/dir.c
+++ b/trunk/fs/hfsplus/dir.c
@@ -121,9 +121,9 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(err);
}
-static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
+static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
int len, err;
char strbuf[HFSPLUS_MAX_STRLEN + 1];
@@ -132,7 +132,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
struct hfsplus_readdir_data *rd;
u16 type;
- if (file->f_pos >= inode->i_size)
+ if (filp->f_pos >= inode->i_size)
return 0;
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
@@ -143,13 +143,14 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
if (err)
goto out;
- if (ctx->pos == 0) {
+ switch ((u32)filp->f_pos) {
+ case 0:
/* This is completely artificial... */
- if (!dir_emit_dot(file, ctx))
+ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
goto out;
- ctx->pos = 1;
- }
- if (ctx->pos == 1) {
+ filp->f_pos++;
+ /* fall through */
+ case 1:
if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
err = -EIO;
goto out;
@@ -167,16 +168,19 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
err = -EIO;
goto out;
}
- if (!dir_emit(ctx, "..", 2,
+ if (filldir(dirent, "..", 2, 1,
be32_to_cpu(entry.thread.parentID), DT_DIR))
goto out;
- ctx->pos = 2;
+ filp->f_pos++;
+ /* fall through */
+ default:
+ if (filp->f_pos >= inode->i_size)
+ goto out;
+ err = hfs_brec_goto(&fd, filp->f_pos - 1);
+ if (err)
+ goto out;
}
- if (ctx->pos >= inode->i_size)
- goto out;
- err = hfs_brec_goto(&fd, ctx->pos - 1);
- if (err)
- goto out;
+
for (;;) {
if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) {
pr_err("walked past end of dir\n");
@@ -207,7 +211,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
HFSPLUS_SB(sb)->hidden_dir->i_ino ==
be32_to_cpu(entry.folder.id))
goto next;
- if (!dir_emit(ctx, strbuf, len,
+ if (filldir(dirent, strbuf, len, filp->f_pos,
be32_to_cpu(entry.folder.id), DT_DIR))
break;
} else if (type == HFSPLUS_FILE) {
@@ -216,7 +220,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
err = -EIO;
goto out;
}
- if (!dir_emit(ctx, strbuf, len,
+ if (filldir(dirent, strbuf, len, filp->f_pos,
be32_to_cpu(entry.file.id), DT_REG))
break;
} else {
@@ -225,22 +229,22 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
goto out;
}
next:
- ctx->pos++;
- if (ctx->pos >= inode->i_size)
+ filp->f_pos++;
+ if (filp->f_pos >= inode->i_size)
goto out;
err = hfs_brec_goto(&fd, 1);
if (err)
goto out;
}
- rd = file->private_data;
+ rd = filp->private_data;
if (!rd) {
rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL);
if (!rd) {
err = -ENOMEM;
goto out;
}
- file->private_data = rd;
- rd->file = file;
+ filp->private_data = rd;
+ rd->file = filp;
list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list);
}
memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
@@ -534,7 +538,7 @@ const struct inode_operations hfsplus_dir_inode_operations = {
const struct file_operations hfsplus_dir_operations = {
.fsync = hfsplus_file_fsync,
.read = generic_read_dir,
- .iterate = hfsplus_readdir,
+ .readdir = hfsplus_readdir,
.unlocked_ioctl = hfsplus_ioctl,
.llseek = generic_file_llseek,
.release = hfsplus_dir_release,
diff --git a/trunk/fs/hostfs/hostfs_kern.c b/trunk/fs/hostfs/hostfs_kern.c
index cddb05217512..32f35f187989 100644
--- a/trunk/fs/hostfs/hostfs_kern.c
+++ b/trunk/fs/hostfs/hostfs_kern.c
@@ -277,7 +277,7 @@ static const struct super_operations hostfs_sbops = {
.show_options = hostfs_show_options,
};
-int hostfs_readdir(struct file *file, struct dir_context *ctx)
+int hostfs_readdir(struct file *file, void *ent, filldir_t filldir)
{
void *dir;
char *name;
@@ -292,11 +292,12 @@ int hostfs_readdir(struct file *file, struct dir_context *ctx)
__putname(name);
if (dir == NULL)
return -error;
- next = ctx->pos;
+ next = file->f_pos;
while ((name = read_dir(dir, &next, &ino, &len, &type)) != NULL) {
- if (!dir_emit(ctx, name, len, ino, type))
- break;
- ctx->pos = next;
+ error = (*filldir)(ent, name, len, file->f_pos,
+ ino, type);
+ if (error) break;
+ file->f_pos = next;
}
close_dir(dir);
return 0;
@@ -392,7 +393,7 @@ static const struct file_operations hostfs_file_fops = {
static const struct file_operations hostfs_dir_fops = {
.llseek = generic_file_llseek,
- .iterate = hostfs_readdir,
+ .readdir = hostfs_readdir,
.read = generic_read_dir,
};
diff --git a/trunk/fs/hpfs/dir.c b/trunk/fs/hpfs/dir.c
index 292b1acb9b81..834ac13c04b7 100644
--- a/trunk/fs/hpfs/dir.c
+++ b/trunk/fs/hpfs/dir.c
@@ -57,14 +57,14 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
return -ESPIPE;
}
-static int hpfs_readdir(struct file *file, struct dir_context *ctx)
+static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
int lc;
- loff_t next_pos;
+ long old_pos;
unsigned char *tempname;
int c1, c2 = 0;
int ret = 0;
@@ -105,11 +105,11 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx)
}
}
lc = hpfs_sb(inode->i_sb)->sb_lowercase;
- if (ctx->pos == 12) { /* diff -r requires this (note, that diff -r */
- ctx->pos = 13; /* also fails on msdos filesystem in 2.0) */
+ if (filp->f_pos == 12) { /* diff -r requires this (note, that diff -r */
+ filp->f_pos = 13; /* also fails on msdos filesystem in 2.0) */
goto out;
}
- if (ctx->pos == 13) {
+ if (filp->f_pos == 13) {
ret = -ENOENT;
goto out;
}
@@ -120,34 +120,33 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx)
accepted by filldir, but what can I do?
maybe killall -9 ls helps */
if (hpfs_sb(inode->i_sb)->sb_chk)
- if (hpfs_stop_cycles(inode->i_sb, ctx->pos, &c1, &c2, "hpfs_readdir")) {
+ if (hpfs_stop_cycles(inode->i_sb, filp->f_pos, &c1, &c2, "hpfs_readdir")) {
ret = -EFSERROR;
goto out;
}
- if (ctx->pos == 12)
+ if (filp->f_pos == 12)
goto out;
- if (ctx->pos == 3 || ctx->pos == 4 || ctx->pos == 5) {
- printk("HPFS: warning: pos==%d\n",(int)ctx->pos);
+ if (filp->f_pos == 3 || filp->f_pos == 4 || filp->f_pos == 5) {
+ printk("HPFS: warning: pos==%d\n",(int)filp->f_pos);
goto out;
}
- if (ctx->pos == 0) {
- if (!dir_emit_dot(file, ctx))
+ if (filp->f_pos == 0) {
+ if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0)
goto out;
- ctx->pos = 11;
+ filp->f_pos = 11;
}
- if (ctx->pos == 11) {
- if (!dir_emit(ctx, "..", 2, hpfs_inode->i_parent_dir, DT_DIR))
+ if (filp->f_pos == 11) {
+ if (filldir(dirent, "..", 2, filp->f_pos, hpfs_inode->i_parent_dir, DT_DIR) < 0)
goto out;
- ctx->pos = 1;
+ filp->f_pos = 1;
}
- if (ctx->pos == 1) {
- ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1;
- hpfs_add_pos(inode, &file->f_pos);
- file->f_version = inode->i_version;
+ if (filp->f_pos == 1) {
+ filp->f_pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1;
+ hpfs_add_pos(inode, &filp->f_pos);
+ filp->f_version = inode->i_version;
}
- next_pos = ctx->pos;
- if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) {
- ctx->pos = next_pos;
+ old_pos = filp->f_pos;
+ if (!(de = map_pos_dirent(inode, &filp->f_pos, &qbh))) {
ret = -EIOERROR;
goto out;
}
@@ -155,21 +154,20 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx)
if (hpfs_sb(inode->i_sb)->sb_chk) {
if (de->first && !de->last && (de->namelen != 2
|| de ->name[0] != 1 || de->name[1] != 1))
- hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", (unsigned long)ctx->pos);
+ hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos);
if (de->last && (de->namelen != 1 || de ->name[0] != 255))
- hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", (unsigned long)ctx->pos);
+ hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos);
}
hpfs_brelse4(&qbh);
- ctx->pos = next_pos;
goto again;
}
tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3);
- if (!dir_emit(ctx, tempname, de->namelen, le32_to_cpu(de->fnode), DT_UNKNOWN)) {
+ if (filldir(dirent, tempname, de->namelen, old_pos, le32_to_cpu(de->fnode), DT_UNKNOWN) < 0) {
+ filp->f_pos = old_pos;
if (tempname != de->name) kfree(tempname);
hpfs_brelse4(&qbh);
goto out;
}
- ctx->pos = next_pos;
if (tempname != de->name) kfree(tempname);
hpfs_brelse4(&qbh);
}
@@ -324,7 +322,7 @@ const struct file_operations hpfs_dir_ops =
{
.llseek = hpfs_dir_lseek,
.read = generic_read_dir,
- .iterate = hpfs_readdir,
+ .readdir = hpfs_readdir,
.release = hpfs_dir_release,
.fsync = hpfs_file_fsync,
};
diff --git a/trunk/fs/hppfs/hppfs.c b/trunk/fs/hppfs/hppfs.c
index fc90ab11c340..cd3e38972c86 100644
--- a/trunk/fs/hppfs/hppfs.c
+++ b/trunk/fs/hppfs/hppfs.c
@@ -542,8 +542,8 @@ static const struct file_operations hppfs_file_fops = {
};
struct hppfs_dirent {
- struct dir_context ctx;
- struct dir_context *caller;
+ void *vfs_dirent;
+ filldir_t filldir;
struct dentry *dentry;
};
@@ -555,29 +555,34 @@ static int hppfs_filldir(void *d, const char *name, int size,
if (file_removed(dirent->dentry, name))
return 0;
- dirent->caller->pos = dirent->ctx.pos;
- return !dir_emit(dirent->caller, name, size, inode, type);
+ return (*dirent->filldir)(dirent->vfs_dirent, name, size, offset,
+ inode, type);
}
-static int hppfs_readdir(struct file *file, struct dir_context *ctx)
+static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir)
{
struct hppfs_private *data = file->private_data;
struct file *proc_file = data->proc_file;
- struct hppfs_dirent d = {
- .ctx.actor = hppfs_filldir,
- .caller = ctx,
- .dentry = file->f_path.dentry
- };
+ int (*readdir)(struct file *, void *, filldir_t);
+ struct hppfs_dirent dirent = ((struct hppfs_dirent)
+ { .vfs_dirent = ent,
+ .filldir = filldir,
+ .dentry = file->f_path.dentry
+ });
int err;
- proc_file->f_pos = ctx->pos;
- err = iterate_dir(proc_file, &d.ctx);
- ctx->pos = d.ctx.pos;
+
+ readdir = file_inode(proc_file)->i_fop->readdir;
+
+ proc_file->f_pos = file->f_pos;
+ err = (*readdir)(proc_file, &dirent, hppfs_filldir);
+ file->f_pos = proc_file->f_pos;
+
return err;
}
static const struct file_operations hppfs_dir_fops = {
.owner = NULL,
- .iterate = hppfs_readdir,
+ .readdir = hppfs_readdir,
.open = hppfs_dir_open,
.llseek = default_llseek,
.release = hppfs_release,
diff --git a/trunk/fs/internal.h b/trunk/fs/internal.h
index 68121584ae37..eaa75f75b625 100644
--- a/trunk/fs/internal.h
+++ b/trunk/fs/internal.h
@@ -131,12 +131,6 @@ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
*/
extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
-/*
- * splice.c
- */
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
- loff_t *opos, size_t len, unsigned int flags);
-
/*
* pipe.c
*/
diff --git a/trunk/fs/isofs/dir.c b/trunk/fs/isofs/dir.c
index b943cbd963bb..a7d5c3c3d4e6 100644
--- a/trunk/fs/isofs/dir.c
+++ b/trunk/fs/isofs/dir.c
@@ -78,8 +78,8 @@ int get_acorn_filename(struct iso_directory_record *de,
/*
* This should _really_ be cleaned up some day..
*/
-static int do_isofs_readdir(struct inode *inode, struct file *file,
- struct dir_context *ctx,
+static int do_isofs_readdir(struct inode *inode, struct file *filp,
+ void *dirent, filldir_t filldir,
char *tmpname, struct iso_directory_record *tmpde)
{
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
@@ -94,10 +94,10 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
struct iso_directory_record *de;
struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
- offset = ctx->pos & (bufsize - 1);
- block = ctx->pos >> bufbits;
+ offset = filp->f_pos & (bufsize - 1);
+ block = filp->f_pos >> bufbits;
- while (ctx->pos < inode->i_size) {
+ while (filp->f_pos < inode->i_size) {
int de_len;
if (!bh) {
@@ -108,7 +108,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
de = (struct iso_directory_record *) (bh->b_data + offset);
- de_len = *(unsigned char *)de;
+ de_len = *(unsigned char *) de;
/*
* If the length byte is zero, we should move on to the next
@@ -119,8 +119,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
if (de_len == 0) {
brelse(bh);
bh = NULL;
- ctx->pos = (ctx->pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1);
- block = ctx->pos >> bufbits;
+ filp->f_pos = (filp->f_pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1);
+ block = filp->f_pos >> bufbits;
offset = 0;
continue;
}
@@ -164,16 +164,16 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
if (de->flags[-sbi->s_high_sierra] & 0x80) {
first_de = 0;
- ctx->pos += de_len;
+ filp->f_pos += de_len;
continue;
}
first_de = 1;
/* Handle the case of the '.' directory */
if (de->name_len[0] == 1 && de->name[0] == 0) {
- if (!dir_emit_dot(file, ctx))
+ if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0)
break;
- ctx->pos += de_len;
+ filp->f_pos += de_len;
continue;
}
@@ -181,9 +181,10 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
/* Handle the case of the '..' directory */
if (de->name_len[0] == 1 && de->name[0] == 1) {
- if (!dir_emit_dotdot(file, ctx))
+ inode_number = parent_ino(filp->f_path.dentry);
+ if (filldir(dirent, "..", 2, filp->f_pos, inode_number, DT_DIR) < 0)
break;
- ctx->pos += de_len;
+ filp->f_pos += de_len;
continue;
}
@@ -197,7 +198,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
if ((sbi->s_hide && (de->flags[-sbi->s_high_sierra] & 1)) ||
(!sbi->s_showassoc &&
(de->flags[-sbi->s_high_sierra] & 4))) {
- ctx->pos += de_len;
+ filp->f_pos += de_len;
continue;
}
@@ -229,10 +230,10 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
}
}
if (len > 0) {
- if (!dir_emit(ctx, p, len, inode_number, DT_UNKNOWN))
+ if (filldir(dirent, p, len, filp->f_pos, inode_number, DT_UNKNOWN) < 0)
break;
}
- ctx->pos += de_len;
+ filp->f_pos += de_len;
continue;
}
@@ -246,12 +247,13 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
* handling split directory entries.. The real work is done by
* "do_isofs_readdir()".
*/
-static int isofs_readdir(struct file *file, struct dir_context *ctx)
+static int isofs_readdir(struct file *filp,
+ void *dirent, filldir_t filldir)
{
int result;
char *tmpname;
struct iso_directory_record *tmpde;
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
tmpname = (char *)__get_free_page(GFP_KERNEL);
if (tmpname == NULL)
@@ -259,7 +261,7 @@ static int isofs_readdir(struct file *file, struct dir_context *ctx)
tmpde = (struct iso_directory_record *) (tmpname+1024);
- result = do_isofs_readdir(inode, file, ctx, tmpname, tmpde);
+ result = do_isofs_readdir(inode, filp, dirent, filldir, tmpname, tmpde);
free_page((unsigned long) tmpname);
return result;
@@ -269,7 +271,7 @@ const struct file_operations isofs_dir_operations =
{
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = isofs_readdir,
+ .readdir = isofs_readdir,
};
/*
diff --git a/trunk/fs/jbd/transaction.c b/trunk/fs/jbd/transaction.c
index be0c39b66fe0..e3e255c0a509 100644
--- a/trunk/fs/jbd/transaction.c
+++ b/trunk/fs/jbd/transaction.c
@@ -2019,20 +2019,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
* void journal_invalidatepage() - invalidate a journal page
* @journal: journal to use for flush
* @page: page to flush
- * @offset: offset of the range to invalidate
- * @length: length of the range to invalidate
+ * @offset: length of page to invalidate.
*
- * Reap page buffers containing data in specified range in page.
+ * Reap page buffers containing data after offset in page.
*/
void journal_invalidatepage(journal_t *journal,
struct page *page,
- unsigned int offset,
- unsigned int length)
+ unsigned long offset)
{
struct buffer_head *head, *bh, *next;
- unsigned int stop = offset + length;
unsigned int curr_off = 0;
- int partial_page = (offset || length < PAGE_CACHE_SIZE);
int may_free = 1;
if (!PageLocked(page))
@@ -2040,8 +2036,6 @@ void journal_invalidatepage(journal_t *journal,
if (!page_has_buffers(page))
return;
- BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
-
/* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be
* cautious in our locking. */
@@ -2051,14 +2045,11 @@ void journal_invalidatepage(journal_t *journal,
unsigned int next_off = curr_off + bh->b_size;
next = bh->b_this_page;
- if (next_off > stop)
- return;
-
if (offset <= curr_off) {
/* This block is wholly outside the truncation point */
lock_buffer(bh);
may_free &= journal_unmap_buffer(journal, bh,
- partial_page);
+ offset > 0);
unlock_buffer(bh);
}
curr_off = next_off;
@@ -2066,7 +2057,7 @@ void journal_invalidatepage(journal_t *journal,
} while (bh != head);
- if (!partial_page) {
+ if (!offset) {
if (may_free && try_to_free_buffers(page))
J_ASSERT(!page_has_buffers(page));
}
diff --git a/trunk/fs/jbd2/Kconfig b/trunk/fs/jbd2/Kconfig
index 5a9f5534d57b..69a48c2944da 100644
--- a/trunk/fs/jbd2/Kconfig
+++ b/trunk/fs/jbd2/Kconfig
@@ -20,7 +20,7 @@ config JBD2
config JBD2_DEBUG
bool "JBD2 (ext4) debugging support"
- depends on JBD2
+ depends on JBD2 && DEBUG_FS
help
If you are using the ext4 journaled file system (or
potentially any other filesystem/device using JBD2), this option
@@ -29,7 +29,7 @@ config JBD2_DEBUG
By default, the debugging output will be turned off.
If you select Y here, then you will be able to turn on debugging
- with "echo N > /sys/module/jbd2/parameters/jbd2_debug", where N is a
+ with "echo N > /sys/kernel/debug/jbd2/jbd2-debug", where N is a
number between 1 and 5. The higher the number, the more debugging
output is generated. To turn debugging off again, do
- "echo 0 > /sys/module/jbd2/parameters/jbd2_debug".
+ "echo 0 > /sys/kernel/debug/jbd2/jbd2-debug".
diff --git a/trunk/fs/jbd2/checkpoint.c b/trunk/fs/jbd2/checkpoint.c
index 7f34f4716165..c78841ee81cf 100644
--- a/trunk/fs/jbd2/checkpoint.c
+++ b/trunk/fs/jbd2/checkpoint.c
@@ -120,8 +120,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
int nblocks, space_left;
/* assert_spin_locked(&journal->j_state_lock); */
- nblocks = jbd2_space_needed(journal);
- while (jbd2_log_space_left(journal) < nblocks) {
+ nblocks = jbd_space_needed(journal);
+ while (__jbd2_log_space_left(journal) < nblocks) {
if (journal->j_flags & JBD2_ABORT)
return;
write_unlock(&journal->j_state_lock);
@@ -140,8 +140,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
*/
write_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
- nblocks = jbd2_space_needed(journal);
- space_left = jbd2_log_space_left(journal);
+ nblocks = jbd_space_needed(journal);
+ space_left = __jbd2_log_space_left(journal);
if (space_left < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL;
tid_t tid = 0;
@@ -156,15 +156,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
/* We were able to recover space; yay! */
;
} else if (tid) {
- /*
- * jbd2_journal_commit_transaction() may want
- * to take the checkpoint_mutex if JBD2_FLUSHED
- * is set. So we need to temporarily drop it.
- */
- mutex_unlock(&journal->j_checkpoint_mutex);
jbd2_log_wait_commit(journal, tid);
- write_lock(&journal->j_state_lock);
- continue;
} else {
printk(KERN_ERR "%s: needed %d blocks and "
"only had %d space available\n",
@@ -633,6 +625,10 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
__jbd2_journal_drop_transaction(journal, transaction);
jbd2_journal_free_transaction(transaction);
+
+ /* Just in case anybody was waiting for more transactions to be
+ checkpointed... */
+ wake_up(&journal->j_wait_logspace);
ret = 1;
out:
return ret;
@@ -694,7 +690,9 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact
J_ASSERT(transaction->t_state == T_FINISHED);
J_ASSERT(transaction->t_buffers == NULL);
J_ASSERT(transaction->t_forget == NULL);
+ J_ASSERT(transaction->t_iobuf_list == NULL);
J_ASSERT(transaction->t_shadow_list == NULL);
+ J_ASSERT(transaction->t_log_list == NULL);
J_ASSERT(transaction->t_checkpoint_list == NULL);
J_ASSERT(transaction->t_checkpoint_io_list == NULL);
J_ASSERT(atomic_read(&transaction->t_updates) == 0);
diff --git a/trunk/fs/jbd2/commit.c b/trunk/fs/jbd2/commit.c
index 559bec1a37b4..0f53946f13c1 100644
--- a/trunk/fs/jbd2/commit.c
+++ b/trunk/fs/jbd2/commit.c
@@ -30,22 +30,15 @@
#include
/*
- * IO end handler for temporary buffer_heads handling writes to the journal.
+ * Default IO end handler for temporary BJ_IO buffer_heads.
*/
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{
- struct buffer_head *orig_bh = bh->b_private;
-
BUFFER_TRACE(bh, "");
if (uptodate)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
- if (orig_bh) {
- clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
- smp_mb__after_clear_bit();
- wake_up_bit(&orig_bh->b_state, BH_Shadow);
- }
unlock_buffer(bh);
}
@@ -92,7 +85,8 @@ static void release_buffer_page(struct buffer_head *bh)
__brelse(bh);
}
-static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
+static void jbd2_commit_block_csum_set(journal_t *j,
+ struct journal_head *descriptor)
{
struct commit_header *h;
__u32 csum;
@@ -100,11 +94,12 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return;
- h = (struct commit_header *)(bh->b_data);
+ h = (struct commit_header *)(jh2bh(descriptor)->b_data);
h->h_chksum_type = 0;
h->h_chksum_size = 0;
h->h_chksum[0] = 0;
- csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
+ csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+ j->j_blocksize);
h->h_chksum[0] = cpu_to_be32(csum);
}
@@ -121,6 +116,7 @@ static int journal_submit_commit_record(journal_t *journal,
struct buffer_head **cbh,
__u32 crc32_sum)
{
+ struct journal_head *descriptor;
struct commit_header *tmp;
struct buffer_head *bh;
int ret;
@@ -131,10 +127,12 @@ static int journal_submit_commit_record(journal_t *journal,
if (is_journal_aborted(journal))
return 0;
- bh = jbd2_journal_get_descriptor_buffer(journal);
- if (!bh)
+ descriptor = jbd2_journal_get_descriptor_buffer(journal);
+ if (!descriptor)
return 1;
+ bh = jh2bh(descriptor);
+
tmp = (struct commit_header *)bh->b_data;
tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
@@ -148,9 +146,9 @@ static int journal_submit_commit_record(journal_t *journal,
tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
}
- jbd2_commit_block_csum_set(journal, bh);
+ jbd2_commit_block_csum_set(journal, descriptor);
- BUFFER_TRACE(bh, "submit commit block");
+ JBUFFER_TRACE(descriptor, "submit commit block");
lock_buffer(bh);
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
@@ -182,6 +180,7 @@ static int journal_wait_on_commit_record(journal_t *journal,
if (unlikely(!buffer_uptodate(bh)))
ret = -EIO;
put_bh(bh); /* One for getblk() */
+ jbd2_journal_put_journal_head(bh2jh(bh));
return ret;
}
@@ -322,7 +321,7 @@ static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
}
static void jbd2_descr_block_csum_set(journal_t *j,
- struct buffer_head *bh)
+ struct journal_head *descriptor)
{
struct jbd2_journal_block_tail *tail;
__u32 csum;
@@ -330,10 +329,12 @@ static void jbd2_descr_block_csum_set(journal_t *j,
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return;
- tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
+ tail = (struct jbd2_journal_block_tail *)
+ (jh2bh(descriptor)->b_data + j->j_blocksize -
sizeof(struct jbd2_journal_block_tail));
tail->t_checksum = 0;
- csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
+ csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+ j->j_blocksize);
tail->t_checksum = cpu_to_be32(csum);
}
@@ -342,21 +343,20 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
{
struct page *page = bh->b_page;
__u8 *addr;
- __u32 csum32;
+ __u32 csum;
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return;
sequence = cpu_to_be32(sequence);
addr = kmap_atomic(page);
- csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
- sizeof(sequence));
- csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
- bh->b_size);
+ csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
+ sizeof(sequence));
+ csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data),
+ bh->b_size);
kunmap_atomic(addr);
- /* We only have space to store the lower 16 bits of the crc32c. */
- tag->t_checksum = cpu_to_be16(csum32);
+ tag->t_checksum = cpu_to_be32(csum);
}
/*
* jbd2_journal_commit_transaction
@@ -368,8 +368,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
{
struct transaction_stats_s stats;
transaction_t *commit_transaction;
- struct journal_head *jh;
- struct buffer_head *descriptor;
+ struct journal_head *jh, *new_jh, *descriptor;
struct buffer_head **wbuf = journal->j_wbuf;
int bufs;
int flags;
@@ -393,8 +392,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
tid_t first_tid;
int update_tail;
int csum_size = 0;
- LIST_HEAD(io_bufs);
- LIST_HEAD(log_bufs);
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
csum_size = sizeof(struct jbd2_journal_block_tail);
@@ -427,13 +424,13 @@ void jbd2_journal_commit_transaction(journal_t *journal)
J_ASSERT(journal->j_committing_transaction == NULL);
commit_transaction = journal->j_running_transaction;
+ J_ASSERT(commit_transaction->t_state == T_RUNNING);
trace_jbd2_start_commit(journal, commit_transaction);
jbd_debug(1, "JBD2: starting commit of transaction %d\n",
commit_transaction->t_tid);
write_lock(&journal->j_state_lock);
- J_ASSERT(commit_transaction->t_state == T_RUNNING);
commit_transaction->t_state = T_LOCKED;
trace_jbd2_commit_locking(journal, commit_transaction);
@@ -523,12 +520,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
*/
jbd2_journal_switch_revoke_table(journal);
- /*
- * Reserved credits cannot be claimed anymore, free them
- */
- atomic_sub(atomic_read(&journal->j_reserved_credits),
- &commit_transaction->t_outstanding_credits);
-
trace_jbd2_commit_flushing(journal, commit_transaction);
stats.run.rs_flushing = jiffies;
stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
@@ -542,7 +533,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
wake_up(&journal->j_wait_transaction_locked);
write_unlock(&journal->j_state_lock);
- jbd_debug(3, "JBD2: commit phase 2a\n");
+ jbd_debug(3, "JBD2: commit phase 2\n");
/*
* Now start flushing things to disk, in the order they appear
@@ -554,10 +545,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
blk_start_plug(&plug);
jbd2_journal_write_revoke_records(journal, commit_transaction,
- &log_bufs, WRITE_SYNC);
+ WRITE_SYNC);
blk_finish_plug(&plug);
- jbd_debug(3, "JBD2: commit phase 2b\n");
+ jbd_debug(3, "JBD2: commit phase 2\n");
/*
* Way to go: we have now written out all of the data for a
@@ -580,8 +571,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
atomic_read(&commit_transaction->t_outstanding_credits));
err = 0;
- bufs = 0;
descriptor = NULL;
+ bufs = 0;
blk_start_plug(&plug);
while (commit_transaction->t_buffers) {
@@ -613,6 +604,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
record the metadata buffer. */
if (!descriptor) {
+ struct buffer_head *bh;
+
J_ASSERT (bufs == 0);
jbd_debug(4, "JBD2: get descriptor\n");
@@ -623,26 +616,26 @@ void jbd2_journal_commit_transaction(journal_t *journal)
continue;
}
+ bh = jh2bh(descriptor);
jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
- (unsigned long long)descriptor->b_blocknr,
- descriptor->b_data);
- header = (journal_header_t *)descriptor->b_data;
+ (unsigned long long)bh->b_blocknr, bh->b_data);
+ header = (journal_header_t *)&bh->b_data[0];
header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
- tagp = &descriptor->b_data[sizeof(journal_header_t)];
- space_left = descriptor->b_size -
- sizeof(journal_header_t);
+ tagp = &bh->b_data[sizeof(journal_header_t)];
+ space_left = bh->b_size - sizeof(journal_header_t);
first_tag = 1;
- set_buffer_jwrite(descriptor);
- set_buffer_dirty(descriptor);
- wbuf[bufs++] = descriptor;
+ set_buffer_jwrite(bh);
+ set_buffer_dirty(bh);
+ wbuf[bufs++] = bh;
/* Record it so that we can wait for IO
completion later */
- BUFFER_TRACE(descriptor, "ph3: file as descriptor");
- jbd2_file_log_bh(&log_bufs, descriptor);
+ BUFFER_TRACE(bh, "ph3: file as descriptor");
+ jbd2_journal_file_buffer(descriptor, commit_transaction,
+ BJ_LogCtl);
}
/* Where is the buffer to be written? */
@@ -665,22 +658,29 @@ void jbd2_journal_commit_transaction(journal_t *journal)
/* Bump b_count to prevent truncate from stumbling over
the shadowed buffer! @@@ This can go if we ever get
- rid of the shadow pairing of buffers. */
+ rid of the BJ_IO/BJ_Shadow pairing of buffers. */
atomic_inc(&jh2bh(jh)->b_count);
+ /* Make a temporary IO buffer with which to write it out
+ (this will requeue both the metadata buffer and the
+ temporary IO buffer). new_bh goes on BJ_IO*/
+
+ set_bit(BH_JWrite, &jh2bh(jh)->b_state);
/*
- * Make a temporary IO buffer with which to write it out
- * (this will requeue the metadata buffer to BJ_Shadow).
+ * akpm: jbd2_journal_write_metadata_buffer() sets
+ * new_bh->b_transaction to commit_transaction.
+ * We need to clean this up before we release new_bh
+ * (which is of type BJ_IO)
*/
- set_bit(BH_JWrite, &jh2bh(jh)->b_state);
JBUFFER_TRACE(jh, "ph3: write metadata");
flags = jbd2_journal_write_metadata_buffer(commit_transaction,
- jh, &wbuf[bufs], blocknr);
+ jh, &new_jh, blocknr);
if (flags < 0) {
jbd2_journal_abort(journal, flags);
continue;
}
- jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
+ set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
+ wbuf[bufs++] = jh2bh(new_jh);
/* Record the new block's tag in the current descriptor
buffer */
@@ -694,11 +694,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
tag = (journal_block_tag_t *) tagp;
write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
tag->t_flags = cpu_to_be16(tag_flag);
- jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
+ jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh),
commit_transaction->t_tid);
tagp += tag_bytes;
space_left -= tag_bytes;
- bufs++;
if (first_tag) {
memcpy (tagp, journal->j_uuid, 16);
@@ -810,7 +809,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
the log. Before we can commit it, wait for the IO so far to
complete. Control buffers being written are on the
transaction's t_log_list queue, and metadata buffers are on
- the io_bufs list.
+ the t_iobuf_list queue.
Wait for the buffers in reverse order. That way we are
less likely to be woken up until all IOs have completed, and
@@ -819,33 +818,47 @@ void jbd2_journal_commit_transaction(journal_t *journal)
jbd_debug(3, "JBD2: commit phase 3\n");
- while (!list_empty(&io_bufs)) {
- struct buffer_head *bh = list_entry(io_bufs.prev,
- struct buffer_head,
- b_assoc_buffers);
+ /*
+ * akpm: these are BJ_IO, and j_list_lock is not needed.
+ * See __journal_try_to_free_buffer.
+ */
+wait_for_iobuf:
+ while (commit_transaction->t_iobuf_list != NULL) {
+ struct buffer_head *bh;
- wait_on_buffer(bh);
- cond_resched();
+ jh = commit_transaction->t_iobuf_list->b_tprev;
+ bh = jh2bh(jh);
+ if (buffer_locked(bh)) {
+ wait_on_buffer(bh);
+ goto wait_for_iobuf;
+ }
+ if (cond_resched())
+ goto wait_for_iobuf;
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
- jbd2_unfile_log_bh(bh);
+
+ clear_buffer_jwrite(bh);
+
+ JBUFFER_TRACE(jh, "ph4: unfile after journal write");
+ jbd2_journal_unfile_buffer(journal, jh);
/*
- * The list contains temporary buffer heads created by
- * jbd2_journal_write_metadata_buffer().
+ * ->t_iobuf_list should contain only dummy buffer_heads
+ * which were created by jbd2_journal_write_metadata_buffer().
*/
BUFFER_TRACE(bh, "dumping temporary bh");
+ jbd2_journal_put_journal_head(jh);
__brelse(bh);
J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
free_buffer_head(bh);
- /* We also have to refile the corresponding shadowed buffer */
+ /* We also have to unlock and free the corresponding
+ shadowed buffer */
jh = commit_transaction->t_shadow_list->b_tprev;
bh = jh2bh(jh);
- clear_buffer_jwrite(bh);
+ clear_bit(BH_JWrite, &bh->b_state);
J_ASSERT_BH(bh, buffer_jbddirty(bh));
- J_ASSERT_BH(bh, !buffer_shadow(bh));
/* The metadata is now released for reuse, but we need
to remember it against this transaction so that when
@@ -853,6 +866,14 @@ void jbd2_journal_commit_transaction(journal_t *journal)
required. */
JBUFFER_TRACE(jh, "file as BJ_Forget");
jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
+ /*
+ * Wake up any transactions which were waiting for this IO to
+ * complete. The barrier must be here so that changes by
+ * jbd2_journal_file_buffer() take effect before wake_up_bit()
+ * does the waitqueue check.
+ */
+ smp_mb();
+ wake_up_bit(&bh->b_state, BH_Unshadow);
JBUFFER_TRACE(jh, "brelse shadowed buffer");
__brelse(bh);
}
@@ -862,19 +883,26 @@ void jbd2_journal_commit_transaction(journal_t *journal)
jbd_debug(3, "JBD2: commit phase 4\n");
/* Here we wait for the revoke record and descriptor record buffers */
- while (!list_empty(&log_bufs)) {
+ wait_for_ctlbuf:
+ while (commit_transaction->t_log_list != NULL) {
struct buffer_head *bh;
- bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
- wait_on_buffer(bh);
- cond_resched();
+ jh = commit_transaction->t_log_list->b_tprev;
+ bh = jh2bh(jh);
+ if (buffer_locked(bh)) {
+ wait_on_buffer(bh);
+ goto wait_for_ctlbuf;
+ }
+ if (cond_resched())
+ goto wait_for_ctlbuf;
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
- jbd2_unfile_log_bh(bh);
+ jbd2_journal_unfile_buffer(journal, jh);
+ jbd2_journal_put_journal_head(jh);
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
@@ -924,7 +952,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
J_ASSERT(list_empty(&commit_transaction->t_inode_list));
J_ASSERT(commit_transaction->t_buffers == NULL);
J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
+ J_ASSERT(commit_transaction->t_iobuf_list == NULL);
J_ASSERT(commit_transaction->t_shadow_list == NULL);
+ J_ASSERT(commit_transaction->t_log_list == NULL);
restart_loop:
/*
diff --git a/trunk/fs/jbd2/journal.c b/trunk/fs/jbd2/journal.c
index 02c7ad9d7a41..95457576e434 100644
--- a/trunk/fs/jbd2/journal.c
+++ b/trunk/fs/jbd2/journal.c
@@ -103,24 +103,6 @@ EXPORT_SYMBOL(jbd2_inode_cache);
static void __journal_abort_soft (journal_t *journal, int errno);
static int jbd2_journal_create_slab(size_t slab_size);
-#ifdef CONFIG_JBD2_DEBUG
-void __jbd2_debug(int level, const char *file, const char *func,
- unsigned int line, const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- if (level > jbd2_journal_enable_debug)
- return;
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
- printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf);
- va_end(args);
-}
-EXPORT_SYMBOL(__jbd2_debug);
-#endif
-
/* Checksumming functions */
int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
{
@@ -328,12 +310,14 @@ static void journal_kill_thread(journal_t *journal)
*
* If the source buffer has already been modified by a new transaction
* since we took the last commit snapshot, we use the frozen copy of
- * that data for IO. If we end up using the existing buffer_head's data
- * for the write, then we have to make sure nobody modifies it while the
- * IO is in progress. do_get_write_access() handles this.
+ * that data for IO. If we end up using the existing buffer_head's data
+ * for the write, then we *have* to lock the buffer to prevent anyone
+ * else from using and possibly modifying it while the IO is in
+ * progress.
*
- * The function returns a pointer to the buffer_head to be used for IO.
- *
+ * The function returns a pointer to the buffer_heads to be used for IO.
+ *
+ * We assume that the journal has already been locked in this function.
*
* Return value:
* <0: Error
@@ -346,14 +330,15 @@ static void journal_kill_thread(journal_t *journal)
int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct journal_head *jh_in,
- struct buffer_head **bh_out,
- sector_t blocknr)
+ struct journal_head **jh_out,
+ unsigned long long blocknr)
{
int need_copy_out = 0;
int done_copy_out = 0;
int do_escape = 0;
char *mapped_data;
struct buffer_head *new_bh;
+ struct journal_head *new_jh;
struct page *new_page;
unsigned int new_offset;
struct buffer_head *bh_in = jh2bh(jh_in);
@@ -383,13 +368,14 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
/* keep subsequent assertions sane */
atomic_set(&new_bh->b_count, 1);
+ new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
- jbd_lock_bh_state(bh_in);
-repeat:
/*
* If a new transaction has already done a buffer copy-out, then
* we use that version of the data for the commit.
*/
+ jbd_lock_bh_state(bh_in);
+repeat:
if (jh_in->b_frozen_data) {
done_copy_out = 1;
new_page = virt_to_page(jh_in->b_frozen_data);
@@ -429,7 +415,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
jbd_unlock_bh_state(bh_in);
tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
if (!tmp) {
- brelse(new_bh);
+ jbd2_journal_put_journal_head(new_jh);
return -ENOMEM;
}
jbd_lock_bh_state(bh_in);
@@ -440,7 +426,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
jh_in->b_frozen_data = tmp;
mapped_data = kmap_atomic(new_page);
- memcpy(tmp, mapped_data + new_offset, bh_in->b_size);
+ memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
kunmap_atomic(mapped_data);
new_page = virt_to_page(tmp);
@@ -466,14 +452,14 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
}
set_bh_page(new_bh, new_page, new_offset);
- new_bh->b_size = bh_in->b_size;
- new_bh->b_bdev = journal->j_dev;
+ new_jh->b_transaction = NULL;
+ new_bh->b_size = jh2bh(jh_in)->b_size;
+ new_bh->b_bdev = transaction->t_journal->j_dev;
new_bh->b_blocknr = blocknr;
- new_bh->b_private = bh_in;
set_buffer_mapped(new_bh);
set_buffer_dirty(new_bh);
- *bh_out = new_bh;
+ *jh_out = new_jh;
/*
* The to-be-written buffer needs to get moved to the io queue,
@@ -484,9 +470,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
spin_lock(&journal->j_list_lock);
__jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
spin_unlock(&journal->j_list_lock);
- set_buffer_shadow(bh_in);
jbd_unlock_bh_state(bh_in);
+ JBUFFER_TRACE(new_jh, "file as BJ_IO");
+ jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
+
return do_escape | (done_copy_out << 1);
}
@@ -495,6 +483,35 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
* journal, so that we can begin checkpointing when appropriate.
*/
+/*
+ * __jbd2_log_space_left: Return the number of free blocks left in the journal.
+ *
+ * Called with the journal already locked.
+ *
+ * Called under j_state_lock
+ */
+
+int __jbd2_log_space_left(journal_t *journal)
+{
+ int left = journal->j_free;
+
+ /* assert_spin_locked(&journal->j_state_lock); */
+
+ /*
+ * Be pessimistic here about the number of those free blocks which
+ * might be required for log descriptor control blocks.
+ */
+
+#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */
+
+ left -= MIN_LOG_RESERVED_BLOCKS;
+
+ if (left <= 0)
+ return 0;
+ left -= (left >> 3);
+ return left;
+}
+
/*
* Called with j_state_lock locked for writing.
* Returns true if a transaction commit was started.
@@ -547,17 +564,20 @@ int jbd2_log_start_commit(journal_t *journal, tid_t tid)
}
/*
- * Force and wait any uncommitted transactions. We can only force the running
- * transaction if we don't have an active handle, otherwise, we will deadlock.
- * Returns: <0 in case of error,
- * 0 if nothing to commit,
- * 1 if transaction was successfully committed.
+ * Force and wait upon a commit if the calling process is not within
+ * transaction. This is used for forcing out undo-protected data which contains
+ * bitmaps, when the fs is running out of space.
+ *
+ * We can only force the running transaction if we don't have an active handle;
+ * otherwise, we will deadlock.
+ *
+ * Returns true if a transaction was started.
*/
-static int __jbd2_journal_force_commit(journal_t *journal)
+int jbd2_journal_force_commit_nested(journal_t *journal)
{
transaction_t *transaction = NULL;
tid_t tid;
- int need_to_start = 0, ret = 0;
+ int need_to_start = 0;
read_lock(&journal->j_state_lock);
if (journal->j_running_transaction && !current->journal_info) {
@@ -568,53 +588,16 @@ static int __jbd2_journal_force_commit(journal_t *journal)
transaction = journal->j_committing_transaction;
if (!transaction) {
- /* Nothing to commit */
read_unlock(&journal->j_state_lock);
- return 0;
+ return 0; /* Nothing to retry */
}
+
tid = transaction->t_tid;
read_unlock(&journal->j_state_lock);
if (need_to_start)
jbd2_log_start_commit(journal, tid);
- ret = jbd2_log_wait_commit(journal, tid);
- if (!ret)
- ret = 1;
-
- return ret;
-}
-
-/**
- * Force and wait upon a commit if the calling process is not within
- * transaction. This is used for forcing out undo-protected data which contains
- * bitmaps, when the fs is running out of space.
- *
- * @journal: journal to force
- * Returns true if progress was made.
- */
-int jbd2_journal_force_commit_nested(journal_t *journal)
-{
- int ret;
-
- ret = __jbd2_journal_force_commit(journal);
- return ret > 0;
-}
-
-/**
- * int journal_force_commit() - force any uncommitted transactions
- * @journal: journal to force
- *
- * Caller want unconditional commit. We can only force the running transaction
- * if we don't have an active handle, otherwise, we will deadlock.
- */
-int jbd2_journal_force_commit(journal_t *journal)
-{
- int ret;
-
- J_ASSERT(!current->journal_info);
- ret = __jbd2_journal_force_commit(journal);
- if (ret > 0)
- ret = 0;
- return ret;
+ jbd2_log_wait_commit(journal, tid);
+ return 1;
}
/*
@@ -815,7 +798,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
* But we don't bother doing that, so there will be coherency problems with
* mmaps of blockdevs which hold live JBD-controlled filesystems.
*/
-struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
+struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
{
struct buffer_head *bh;
unsigned long long blocknr;
@@ -834,7 +817,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
set_buffer_uptodate(bh);
unlock_buffer(bh);
BUFFER_TRACE(bh, "return this buffer");
- return bh;
+ return jbd2_journal_add_journal_head(bh);
}
/*
@@ -1079,10 +1062,11 @@ static journal_t * journal_init_common (void)
return NULL;
init_waitqueue_head(&journal->j_wait_transaction_locked);
+ init_waitqueue_head(&journal->j_wait_logspace);
init_waitqueue_head(&journal->j_wait_done_commit);
+ init_waitqueue_head(&journal->j_wait_checkpoint);
init_waitqueue_head(&journal->j_wait_commit);
init_waitqueue_head(&journal->j_wait_updates);
- init_waitqueue_head(&journal->j_wait_reserved);
mutex_init(&journal->j_barrier);
mutex_init(&journal->j_checkpoint_mutex);
spin_lock_init(&journal->j_revoke_lock);
@@ -1092,7 +1076,6 @@ static journal_t * journal_init_common (void)
journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE);
journal->j_min_batch_time = 0;
journal->j_max_batch_time = 15000; /* 15ms */
- atomic_set(&journal->j_reserved_credits, 0);
/* The journal is marked for error until we succeed with recovery! */
journal->j_flags = JBD2_ABORT;
@@ -1335,7 +1318,6 @@ static int journal_reset(journal_t *journal)
static void jbd2_write_superblock(journal_t *journal, int write_op)
{
struct buffer_head *bh = journal->j_sb_buffer;
- journal_superblock_t *sb = journal->j_superblock;
int ret;
trace_jbd2_write_superblock(journal, write_op);
@@ -1357,7 +1339,6 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
clear_buffer_write_io_error(bh);
set_buffer_uptodate(bh);
}
- jbd2_superblock_csum_set(journal, sb);
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
ret = submit_bh(write_op, bh);
@@ -1454,6 +1435,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
jbd_debug(1, "JBD2: updating superblock error (errno %d)\n",
journal->j_errno);
sb->s_errno = cpu_to_be32(journal->j_errno);
+ jbd2_superblock_csum_set(journal, sb);
read_unlock(&journal->j_state_lock);
jbd2_write_superblock(journal, WRITE_SYNC);
@@ -2343,13 +2325,13 @@ static struct journal_head *journal_alloc_journal_head(void)
#ifdef CONFIG_JBD2_DEBUG
atomic_inc(&nr_journal_heads);
#endif
- ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
+ ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
if (!ret) {
jbd_debug(1, "out of memory for journal_head\n");
pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
while (!ret) {
yield();
- ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
+ ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
}
}
return ret;
@@ -2411,8 +2393,10 @@ struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
struct journal_head *new_jh = NULL;
repeat:
- if (!buffer_jbd(bh))
+ if (!buffer_jbd(bh)) {
new_jh = journal_alloc_journal_head();
+ memset(new_jh, 0, sizeof(*new_jh));
+ }
jbd_lock_bh_journal_head(bh);
if (buffer_jbd(bh)) {
diff --git a/trunk/fs/jbd2/recovery.c b/trunk/fs/jbd2/recovery.c
index d4851464b57e..626846bac32f 100644
--- a/trunk/fs/jbd2/recovery.c
+++ b/trunk/fs/jbd2/recovery.c
@@ -399,17 +399,18 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
void *buf, __u32 sequence)
{
- __u32 csum32;
+ __u32 provided, calculated;
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return 1;
sequence = cpu_to_be32(sequence);
- csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
- sizeof(sequence));
- csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
+ calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
+ sizeof(sequence));
+ calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize);
+ provided = be32_to_cpu(tag->t_checksum);
- return tag->t_checksum == cpu_to_be16(csum32);
+ return provided == cpu_to_be32(calculated);
}
static int do_one_pass(journal_t *journal,
diff --git a/trunk/fs/jbd2/revoke.c b/trunk/fs/jbd2/revoke.c
index 198c9c10276d..f30b80b4ce8b 100644
--- a/trunk/fs/jbd2/revoke.c
+++ b/trunk/fs/jbd2/revoke.c
@@ -122,10 +122,9 @@ struct jbd2_revoke_table_s
#ifdef __KERNEL__
static void write_one_revoke_record(journal_t *, transaction_t *,
- struct list_head *,
- struct buffer_head **, int *,
+ struct journal_head **, int *,
struct jbd2_revoke_record_s *, int);
-static void flush_descriptor(journal_t *, struct buffer_head *, int, int);
+static void flush_descriptor(journal_t *, struct journal_head *, int, int);
#endif
/* Utility functions to maintain the revoke table */
@@ -532,10 +531,9 @@ void jbd2_journal_switch_revoke_table(journal_t *journal)
*/
void jbd2_journal_write_revoke_records(journal_t *journal,
transaction_t *transaction,
- struct list_head *log_bufs,
int write_op)
{
- struct buffer_head *descriptor;
+ struct journal_head *descriptor;
struct jbd2_revoke_record_s *record;
struct jbd2_revoke_table_s *revoke;
struct list_head *hash_list;
@@ -555,7 +553,7 @@ void jbd2_journal_write_revoke_records(journal_t *journal,
while (!list_empty(hash_list)) {
record = (struct jbd2_revoke_record_s *)
hash_list->next;
- write_one_revoke_record(journal, transaction, log_bufs,
+ write_one_revoke_record(journal, transaction,
&descriptor, &offset,
record, write_op);
count++;
@@ -575,14 +573,13 @@ void jbd2_journal_write_revoke_records(journal_t *journal,
static void write_one_revoke_record(journal_t *journal,
transaction_t *transaction,
- struct list_head *log_bufs,
- struct buffer_head **descriptorp,
+ struct journal_head **descriptorp,
int *offsetp,
struct jbd2_revoke_record_s *record,
int write_op)
{
int csum_size = 0;
- struct buffer_head *descriptor;
+ struct journal_head *descriptor;
int offset;
journal_header_t *header;
@@ -612,26 +609,26 @@ static void write_one_revoke_record(journal_t *journal,
descriptor = jbd2_journal_get_descriptor_buffer(journal);
if (!descriptor)
return;
- header = (journal_header_t *)descriptor->b_data;
+ header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK);
header->h_sequence = cpu_to_be32(transaction->t_tid);
/* Record it so that we can wait for IO completion later */
- BUFFER_TRACE(descriptor, "file in log_bufs");
- jbd2_file_log_bh(log_bufs, descriptor);
+ JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
+ jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl);
offset = sizeof(jbd2_journal_revoke_header_t);
*descriptorp = descriptor;
}
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
- * ((__be64 *)(&descriptor->b_data[offset])) =
+ * ((__be64 *)(&jh2bh(descriptor)->b_data[offset])) =
cpu_to_be64(record->blocknr);
offset += 8;
} else {
- * ((__be32 *)(&descriptor->b_data[offset])) =
+ * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
cpu_to_be32(record->blocknr);
offset += 4;
}
@@ -639,7 +636,8 @@ static void write_one_revoke_record(journal_t *journal,
*offsetp = offset;
}
-static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
+static void jbd2_revoke_csum_set(journal_t *j,
+ struct journal_head *descriptor)
{
struct jbd2_journal_revoke_tail *tail;
__u32 csum;
@@ -647,10 +645,12 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return;
- tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
+ tail = (struct jbd2_journal_revoke_tail *)
+ (jh2bh(descriptor)->b_data + j->j_blocksize -
sizeof(struct jbd2_journal_revoke_tail));
tail->r_checksum = 0;
- csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
+ csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+ j->j_blocksize);
tail->r_checksum = cpu_to_be32(csum);
}
@@ -662,24 +662,25 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
*/
static void flush_descriptor(journal_t *journal,
- struct buffer_head *descriptor,
+ struct journal_head *descriptor,
int offset, int write_op)
{
jbd2_journal_revoke_header_t *header;
+ struct buffer_head *bh = jh2bh(descriptor);
if (is_journal_aborted(journal)) {
- put_bh(descriptor);
+ put_bh(bh);
return;
}
- header = (jbd2_journal_revoke_header_t *)descriptor->b_data;
+ header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
header->r_count = cpu_to_be32(offset);
jbd2_revoke_csum_set(journal, descriptor);
- set_buffer_jwrite(descriptor);
- BUFFER_TRACE(descriptor, "write");
- set_buffer_dirty(descriptor);
- write_dirty_buffer(descriptor, write_op);
+ set_buffer_jwrite(bh);
+ BUFFER_TRACE(bh, "write");
+ set_buffer_dirty(bh);
+ write_dirty_buffer(bh, write_op);
}
#endif
diff --git a/trunk/fs/jbd2/transaction.c b/trunk/fs/jbd2/transaction.c
index 7aa9a32573bb..10f524c59ea8 100644
--- a/trunk/fs/jbd2/transaction.c
+++ b/trunk/fs/jbd2/transaction.c
@@ -89,8 +89,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
transaction->t_expires = jiffies + journal->j_commit_interval;
spin_lock_init(&transaction->t_handle_lock);
atomic_set(&transaction->t_updates, 0);
- atomic_set(&transaction->t_outstanding_credits,
- atomic_read(&journal->j_reserved_credits));
+ atomic_set(&transaction->t_outstanding_credits, 0);
atomic_set(&transaction->t_handle_count, 0);
INIT_LIST_HEAD(&transaction->t_inode_list);
INIT_LIST_HEAD(&transaction->t_private_list);
@@ -141,112 +140,6 @@ static inline void update_t_max_wait(transaction_t *transaction,
#endif
}
-/*
- * Wait until running transaction passes T_LOCKED state. Also starts the commit
- * if needed. The function expects running transaction to exist and releases
- * j_state_lock.
- */
-static void wait_transaction_locked(journal_t *journal)
- __releases(journal->j_state_lock)
-{
- DEFINE_WAIT(wait);
- int need_to_start;
- tid_t tid = journal->j_running_transaction->t_tid;
-
- prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
- TASK_UNINTERRUPTIBLE);
- need_to_start = !tid_geq(journal->j_commit_request, tid);
- read_unlock(&journal->j_state_lock);
- if (need_to_start)
- jbd2_log_start_commit(journal, tid);
- schedule();
- finish_wait(&journal->j_wait_transaction_locked, &wait);
-}
-
-static void sub_reserved_credits(journal_t *journal, int blocks)
-{
- atomic_sub(blocks, &journal->j_reserved_credits);
- wake_up(&journal->j_wait_reserved);
-}
-
-/*
- * Wait until we can add credits for handle to the running transaction. Called
- * with j_state_lock held for reading. Returns 0 if handle joined the running
- * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
- * caller must retry.
- */
-static int add_transaction_credits(journal_t *journal, int blocks,
- int rsv_blocks)
-{
- transaction_t *t = journal->j_running_transaction;
- int needed;
- int total = blocks + rsv_blocks;
-
- /*
- * If the current transaction is locked down for commit, wait
- * for the lock to be released.
- */
- if (t->t_state == T_LOCKED) {
- wait_transaction_locked(journal);
- return 1;
- }
-
- /*
- * If there is not enough space left in the log to write all
- * potential buffers requested by this operation, we need to
- * stall pending a log checkpoint to free some more log space.
- */
- needed = atomic_add_return(total, &t->t_outstanding_credits);
- if (needed > journal->j_max_transaction_buffers) {
- /*
- * If the current transaction is already too large,
- * then start to commit it: we can then go back and
- * attach this handle to a new transaction.
- */
- atomic_sub(total, &t->t_outstanding_credits);
- wait_transaction_locked(journal);
- return 1;
- }
-
- /*
- * The commit code assumes that it can get enough log space
- * without forcing a checkpoint. This is *critical* for
- * correctness: a checkpoint of a buffer which is also
- * associated with a committing transaction creates a deadlock,
- * so commit simply cannot force through checkpoints.
- *
- * We must therefore ensure the necessary space in the journal
- * *before* starting to dirty potentially checkpointed buffers
- * in the new transaction.
- */
- if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
- atomic_sub(total, &t->t_outstanding_credits);
- read_unlock(&journal->j_state_lock);
- write_lock(&journal->j_state_lock);
- if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
- __jbd2_log_wait_for_space(journal);
- write_unlock(&journal->j_state_lock);
- return 1;
- }
-
- /* No reservation? We are done... */
- if (!rsv_blocks)
- return 0;
-
- needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
- /* We allow at most half of a transaction to be reserved */
- if (needed > journal->j_max_transaction_buffers / 2) {
- sub_reserved_credits(journal, rsv_blocks);
- atomic_sub(total, &t->t_outstanding_credits);
- read_unlock(&journal->j_state_lock);
- wait_event(journal->j_wait_reserved,
- atomic_read(&journal->j_reserved_credits) + rsv_blocks
- <= journal->j_max_transaction_buffers / 2);
- return 1;
- }
- return 0;
-}
-
/*
* start_this_handle: Given a handle, deal with any locking or stalling
* needed to make sure that there is enough journal space for the handle
@@ -258,24 +151,18 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
gfp_t gfp_mask)
{
transaction_t *transaction, *new_transaction = NULL;
- int blocks = handle->h_buffer_credits;
- int rsv_blocks = 0;
+ tid_t tid;
+ int needed, need_to_start;
+ int nblocks = handle->h_buffer_credits;
unsigned long ts = jiffies;
- /*
- * 1/2 of transaction can be reserved so we can practically handle
- * only 1/2 of maximum transaction size per operation
- */
- if (WARN_ON(blocks > journal->j_max_transaction_buffers / 2)) {
+ if (nblocks > journal->j_max_transaction_buffers) {
printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n",
- current->comm, blocks,
- journal->j_max_transaction_buffers / 2);
+ current->comm, nblocks,
+ journal->j_max_transaction_buffers);
return -ENOSPC;
}
- if (handle->h_rsv_handle)
- rsv_blocks = handle->h_rsv_handle->h_buffer_credits;
-
alloc_transaction:
if (!journal->j_running_transaction) {
new_transaction = kmem_cache_zalloc(transaction_cache,
@@ -312,12 +199,8 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
return -EROFS;
}
- /*
- * Wait on the journal's transaction barrier if necessary. Specifically
- * we allow reserved handles to proceed because otherwise commit could
- * deadlock on page writeback not being able to complete.
- */
- if (!handle->h_reserved && journal->j_barrier_count) {
+ /* Wait on the journal's transaction barrier if necessary */
+ if (journal->j_barrier_count) {
read_unlock(&journal->j_state_lock);
wait_event(journal->j_wait_transaction_locked,
journal->j_barrier_count == 0);
@@ -330,7 +213,7 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
goto alloc_transaction;
write_lock(&journal->j_state_lock);
if (!journal->j_running_transaction &&
- (handle->h_reserved || !journal->j_barrier_count)) {
+ !journal->j_barrier_count) {
jbd2_get_transaction(journal, new_transaction);
new_transaction = NULL;
}
@@ -340,18 +223,85 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
transaction = journal->j_running_transaction;
- if (!handle->h_reserved) {
- /* We may have dropped j_state_lock - restart in that case */
- if (add_transaction_credits(journal, blocks, rsv_blocks))
- goto repeat;
- } else {
+ /*
+ * If the current transaction is locked down for commit, wait for the
+ * lock to be released.
+ */
+ if (transaction->t_state == T_LOCKED) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&journal->j_wait_transaction_locked,
+ &wait, TASK_UNINTERRUPTIBLE);
+ read_unlock(&journal->j_state_lock);
+ schedule();
+ finish_wait(&journal->j_wait_transaction_locked, &wait);
+ goto repeat;
+ }
+
+ /*
+ * If there is not enough space left in the log to write all potential
+ * buffers requested by this operation, we need to stall pending a log
+ * checkpoint to free some more log space.
+ */
+ needed = atomic_add_return(nblocks,
+ &transaction->t_outstanding_credits);
+
+ if (needed > journal->j_max_transaction_buffers) {
/*
- * We have handle reserved so we are allowed to join T_LOCKED
- * transaction and we don't have to check for transaction size
- * and journal space.
+ * If the current transaction is already too large, then start
+ * to commit it: we can then go back and attach this handle to
+ * a new transaction.
*/
- sub_reserved_credits(journal, blocks);
- handle->h_reserved = 0;
+ DEFINE_WAIT(wait);
+
+ jbd_debug(2, "Handle %p starting new commit...\n", handle);
+ atomic_sub(nblocks, &transaction->t_outstanding_credits);
+ prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+ TASK_UNINTERRUPTIBLE);
+ tid = transaction->t_tid;
+ need_to_start = !tid_geq(journal->j_commit_request, tid);
+ read_unlock(&journal->j_state_lock);
+ if (need_to_start)
+ jbd2_log_start_commit(journal, tid);
+ schedule();
+ finish_wait(&journal->j_wait_transaction_locked, &wait);
+ goto repeat;
+ }
+
+ /*
+ * The commit code assumes that it can get enough log space
+ * without forcing a checkpoint. This is *critical* for
+ * correctness: a checkpoint of a buffer which is also
+ * associated with a committing transaction creates a deadlock,
+ * so commit simply cannot force through checkpoints.
+ *
+ * We must therefore ensure the necessary space in the journal
+ * *before* starting to dirty potentially checkpointed buffers
+ * in the new transaction.
+ *
+ * The worst part is, any transaction currently committing can
+ * reduce the free space arbitrarily. Be careful to account for
+ * those buffers when checkpointing.
+ */
+
+ /*
+ * @@@ AKPM: This seems rather over-defensive. We're giving commit
+ * a _lot_ of headroom: 1/4 of the journal plus the size of
+ * the committing transaction. Really, we only need to give it
+ * committing_transaction->t_outstanding_credits plus "enough" for
+ * the log control blocks.
+ * Also, this test is inconsistent with the matching one in
+ * jbd2_journal_extend().
+ */
+ if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
+ jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
+ atomic_sub(nblocks, &transaction->t_outstanding_credits);
+ read_unlock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
+ if (__jbd2_log_space_left(journal) < jbd_space_needed(journal))
+ __jbd2_log_wait_for_space(journal);
+ write_unlock(&journal->j_state_lock);
+ goto repeat;
}
/* OK, account for the buffers that this operation expects to
@@ -359,16 +309,15 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
*/
update_t_max_wait(transaction, ts);
handle->h_transaction = transaction;
- handle->h_requested_credits = blocks;
+ handle->h_requested_credits = nblocks;
handle->h_start_jiffies = jiffies;
atomic_inc(&transaction->t_updates);
atomic_inc(&transaction->t_handle_count);
- jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
- handle, blocks,
+ jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
+ handle, nblocks,
atomic_read(&transaction->t_outstanding_credits),
- jbd2_log_space_left(journal));
+ __jbd2_log_space_left(journal));
read_unlock(&journal->j_state_lock);
- current->journal_info = handle;
lock_map_acquire(&handle->h_lockdep_map);
jbd2_journal_free_transaction(new_transaction);
@@ -399,21 +348,16 @@ static handle_t *new_handle(int nblocks)
*
* We make sure that the transaction can guarantee at least nblocks of
* modified buffers in the log. We block until the log can guarantee
- * that much space. Additionally, if rsv_blocks > 0, we also create another
- * handle with rsv_blocks reserved blocks in the journal. This handle is
- * is stored in h_rsv_handle. It is not attached to any particular transaction
- * and thus doesn't block transaction commit. If the caller uses this reserved
- * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
- * on the parent handle will dispose the reserved one. Reserved handle has to
- * be converted to a normal handle using jbd2_journal_start_reserved() before
- * it can be used.
+ * that much space.
+ *
+ * This function is visible to journal users (like ext3fs), so is not
+ * called with the journal already locked.
*
* Return a pointer to a newly allocated handle, or an ERR_PTR() value
* on failure.
*/
-handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
- gfp_t gfp_mask, unsigned int type,
- unsigned int line_no)
+handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask,
+ unsigned int type, unsigned int line_no)
{
handle_t *handle = journal_current_handle();
int err;
@@ -430,24 +374,13 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
handle = new_handle(nblocks);
if (!handle)
return ERR_PTR(-ENOMEM);
- if (rsv_blocks) {
- handle_t *rsv_handle;
- rsv_handle = new_handle(rsv_blocks);
- if (!rsv_handle) {
- jbd2_free_handle(handle);
- return ERR_PTR(-ENOMEM);
- }
- rsv_handle->h_reserved = 1;
- rsv_handle->h_journal = journal;
- handle->h_rsv_handle = rsv_handle;
- }
+ current->journal_info = handle;
err = start_this_handle(journal, handle, gfp_mask);
if (err < 0) {
- if (handle->h_rsv_handle)
- jbd2_free_handle(handle->h_rsv_handle);
jbd2_free_handle(handle);
+ current->journal_info = NULL;
return ERR_PTR(err);
}
handle->h_type = type;
@@ -462,65 +395,10 @@ EXPORT_SYMBOL(jbd2__journal_start);
handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
{
- return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0);
+ return jbd2__journal_start(journal, nblocks, GFP_NOFS, 0, 0);
}
EXPORT_SYMBOL(jbd2_journal_start);
-void jbd2_journal_free_reserved(handle_t *handle)
-{
- journal_t *journal = handle->h_journal;
-
- WARN_ON(!handle->h_reserved);
- sub_reserved_credits(journal, handle->h_buffer_credits);
- jbd2_free_handle(handle);
-}
-EXPORT_SYMBOL(jbd2_journal_free_reserved);
-
-/**
- * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle
- * @handle: handle to start
- *
- * Start handle that has been previously reserved with jbd2_journal_reserve().
- * This attaches @handle to the running transaction (or creates one if there's
- * not transaction running). Unlike jbd2_journal_start() this function cannot
- * block on journal commit, checkpointing, or similar stuff. It can block on
- * memory allocation or frozen journal though.
- *
- * Return 0 on success, non-zero on error - handle is freed in that case.
- */
-int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
- unsigned int line_no)
-{
- journal_t *journal = handle->h_journal;
- int ret = -EIO;
-
- if (WARN_ON(!handle->h_reserved)) {
- /* Someone passed in normal handle? Just stop it. */
- jbd2_journal_stop(handle);
- return ret;
- }
- /*
- * Usefulness of mixing of reserved and unreserved handles is
- * questionable. So far nobody seems to need it so just error out.
- */
- if (WARN_ON(current->journal_info)) {
- jbd2_journal_free_reserved(handle);
- return ret;
- }
-
- handle->h_journal = NULL;
- /*
- * GFP_NOFS is here because callers are likely from writeback or
- * similarly constrained call sites
- */
- ret = start_this_handle(journal, handle, GFP_NOFS);
- if (ret < 0)
- jbd2_journal_free_reserved(handle);
- handle->h_type = type;
- handle->h_line_no = line_no;
- return ret;
-}
-EXPORT_SYMBOL(jbd2_journal_start_reserved);
/**
* int jbd2_journal_extend() - extend buffer credits.
@@ -545,53 +423,49 @@ EXPORT_SYMBOL(jbd2_journal_start_reserved);
int jbd2_journal_extend(handle_t *handle, int nblocks)
{
transaction_t *transaction = handle->h_transaction;
- journal_t *journal;
+ journal_t *journal = transaction->t_journal;
int result;
int wanted;
- WARN_ON(!transaction);
+ result = -EIO;
if (is_handle_aborted(handle))
- return -EROFS;
- journal = transaction->t_journal;
+ goto out;
result = 1;
read_lock(&journal->j_state_lock);
/* Don't extend a locked-down transaction! */
- if (transaction->t_state != T_RUNNING) {
+ if (handle->h_transaction->t_state != T_RUNNING) {
jbd_debug(3, "denied handle %p %d blocks: "
"transaction not running\n", handle, nblocks);
goto error_out;
}
spin_lock(&transaction->t_handle_lock);
- wanted = atomic_add_return(nblocks,
- &transaction->t_outstanding_credits);
+ wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks;
if (wanted > journal->j_max_transaction_buffers) {
jbd_debug(3, "denied handle %p %d blocks: "
"transaction too large\n", handle, nblocks);
- atomic_sub(nblocks, &transaction->t_outstanding_credits);
goto unlock;
}
- if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) >
- jbd2_log_space_left(journal)) {
+ if (wanted > __jbd2_log_space_left(journal)) {
jbd_debug(3, "denied handle %p %d blocks: "
"insufficient log space\n", handle, nblocks);
- atomic_sub(nblocks, &transaction->t_outstanding_credits);
goto unlock;
}
trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
- transaction->t_tid,
+ handle->h_transaction->t_tid,
handle->h_type, handle->h_line_no,
handle->h_buffer_credits,
nblocks);
handle->h_buffer_credits += nblocks;
handle->h_requested_credits += nblocks;
+ atomic_add(nblocks, &transaction->t_outstanding_credits);
result = 0;
jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
@@ -599,6 +473,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
spin_unlock(&transaction->t_handle_lock);
error_out:
read_unlock(&journal->j_state_lock);
+out:
return result;
}
@@ -615,22 +490,19 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
* to a running handle, a call to jbd2_journal_restart will commit the
* handle's transaction so far and reattach the handle to a new
* transaction capabable of guaranteeing the requested number of
- * credits. We preserve reserved handle if there's any attached to the
- * passed in handle.
+ * credits.
*/
int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
{
transaction_t *transaction = handle->h_transaction;
- journal_t *journal;
+ journal_t *journal = transaction->t_journal;
tid_t tid;
int need_to_start, ret;
- WARN_ON(!transaction);
/* If we've had an abort of any type, don't even think about
* actually doing the restart! */
if (is_handle_aborted(handle))
return 0;
- journal = transaction->t_journal;
/*
* First unlink the handle from its current transaction, and start the
@@ -643,18 +515,12 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
spin_lock(&transaction->t_handle_lock);
atomic_sub(handle->h_buffer_credits,
&transaction->t_outstanding_credits);
- if (handle->h_rsv_handle) {
- sub_reserved_credits(journal,
- handle->h_rsv_handle->h_buffer_credits);
- }
if (atomic_dec_and_test(&transaction->t_updates))
wake_up(&journal->j_wait_updates);
- tid = transaction->t_tid;
spin_unlock(&transaction->t_handle_lock);
- handle->h_transaction = NULL;
- current->journal_info = NULL;
jbd_debug(2, "restarting handle %p\n", handle);
+ tid = transaction->t_tid;
need_to_start = !tid_geq(journal->j_commit_request, tid);
read_unlock(&journal->j_state_lock);
if (need_to_start)
@@ -691,14 +557,6 @@ void jbd2_journal_lock_updates(journal_t *journal)
write_lock(&journal->j_state_lock);
++journal->j_barrier_count;
- /* Wait until there are no reserved handles */
- if (atomic_read(&journal->j_reserved_credits)) {
- write_unlock(&journal->j_state_lock);
- wait_event(journal->j_wait_reserved,
- atomic_read(&journal->j_reserved_credits) == 0);
- write_lock(&journal->j_state_lock);
- }
-
/* Wait until there are no running updates */
while (1) {
transaction_t *transaction = journal->j_running_transaction;
@@ -761,12 +619,6 @@ static void warn_dirty_buffer(struct buffer_head *bh)
bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
}
-static int sleep_on_shadow_bh(void *word)
-{
- io_schedule();
- return 0;
-}
-
/*
* If the buffer is already part of the current transaction, then there
* is nothing we need to do. If it is already part of a prior
@@ -782,16 +634,17 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
int force_copy)
{
struct buffer_head *bh;
- transaction_t *transaction = handle->h_transaction;
+ transaction_t *transaction;
journal_t *journal;
int error;
char *frozen_buffer = NULL;
int need_copy = 0;
unsigned long start_lock, time_lock;
- WARN_ON(!transaction);
if (is_handle_aborted(handle))
return -EROFS;
+
+ transaction = handle->h_transaction;
journal = transaction->t_journal;
jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
@@ -901,29 +754,41 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
* journaled. If the primary copy is already going to
* disk then we cannot do copy-out here. */
- if (buffer_shadow(bh)) {
+ if (jh->b_jlist == BJ_Shadow) {
+ DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
+ wait_queue_head_t *wqh;
+
+ wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
+
JBUFFER_TRACE(jh, "on shadow: sleep");
jbd_unlock_bh_state(bh);
- wait_on_bit(&bh->b_state, BH_Shadow,
- sleep_on_shadow_bh, TASK_UNINTERRUPTIBLE);
+ /* commit wakes up all shadow buffers after IO */
+ for ( ; ; ) {
+ prepare_to_wait(wqh, &wait.wait,
+ TASK_UNINTERRUPTIBLE);
+ if (jh->b_jlist != BJ_Shadow)
+ break;
+ schedule();
+ }
+ finish_wait(wqh, &wait.wait);
goto repeat;
}
- /*
- * Only do the copy if the currently-owning transaction still
- * needs it. If buffer isn't on BJ_Metadata list, the
- * committing transaction is past that stage (here we use the
- * fact that BH_Shadow is set under bh_state lock together with
- * refiling to BJ_Shadow list and at this point we know the
- * buffer doesn't have BH_Shadow set).
+ /* Only do the copy if the currently-owning transaction
+ * still needs it. If it is on the Forget list, the
+ * committing transaction is past that stage. The
+ * buffer had better remain locked during the kmalloc,
+ * but that should be true --- we hold the journal lock
+ * still and the buffer is already on the BUF_JOURNAL
+ * list so won't be flushed.
*
* Subtle point, though: if this is a get_undo_access,
* then we will be relying on the frozen_data to contain
* the new value of the committed_data record after the
* transaction, so we HAVE to force the frozen_data copy
- * in that case.
- */
- if (jh->b_jlist == BJ_Metadata || force_copy) {
+ * in that case. */
+
+ if (jh->b_jlist != BJ_Forget || force_copy) {
JBUFFER_TRACE(jh, "generate frozen data");
if (!frozen_buffer) {
JBUFFER_TRACE(jh, "allocate memory for buffer");
@@ -1050,16 +915,14 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
- journal_t *journal;
+ journal_t *journal = transaction->t_journal;
struct journal_head *jh = jbd2_journal_add_journal_head(bh);
int err;
jbd_debug(5, "journal_head %p\n", jh);
- WARN_ON(!transaction);
err = -EROFS;
if (is_handle_aborted(handle))
goto out;
- journal = transaction->t_journal;
err = 0;
JBUFFER_TRACE(jh, "entry");
@@ -1265,14 +1128,12 @@ void jbd2_buffer_abort_trigger(struct journal_head *jh,
int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
- journal_t *journal;
+ journal_t *journal = transaction->t_journal;
struct journal_head *jh;
int ret = 0;
- WARN_ON(!transaction);
if (is_handle_aborted(handle))
- return -EROFS;
- journal = transaction->t_journal;
+ goto out;
jh = jbd2_journal_grab_journal_head(bh);
if (!jh) {
ret = -EUCLEAN;
@@ -1366,7 +1227,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "file as BJ_Metadata");
spin_lock(&journal->j_list_lock);
- __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
+ __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
spin_unlock(&journal->j_list_lock);
out_unlock_bh:
jbd_unlock_bh_state(bh);
@@ -1397,17 +1258,12 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
- journal_t *journal;
+ journal_t *journal = transaction->t_journal;
struct journal_head *jh;
int drop_reserve = 0;
int err = 0;
int was_modified = 0;
- WARN_ON(!transaction);
- if (is_handle_aborted(handle))
- return -EROFS;
- journal = transaction->t_journal;
-
BUFFER_TRACE(bh, "entry");
jbd_lock_bh_state(bh);
@@ -1434,7 +1290,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
*/
jh->b_modified = 0;
- if (jh->b_transaction == transaction) {
+ if (jh->b_transaction == handle->h_transaction) {
J_ASSERT_JH(jh, !jh->b_frozen_data);
/* If we are forgetting a buffer which is already part
@@ -1529,21 +1385,19 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
int jbd2_journal_stop(handle_t *handle)
{
transaction_t *transaction = handle->h_transaction;
- journal_t *journal;
- int err = 0, wait_for_commit = 0;
+ journal_t *journal = transaction->t_journal;
+ int err, wait_for_commit = 0;
tid_t tid;
pid_t pid;
- if (!transaction)
- goto free_and_exit;
- journal = transaction->t_journal;
-
J_ASSERT(journal_current_handle() == handle);
if (is_handle_aborted(handle))
err = -EIO;
- else
+ else {
J_ASSERT(atomic_read(&transaction->t_updates) > 0);
+ err = 0;
+ }
if (--handle->h_ref > 0) {
jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
@@ -1553,7 +1407,7 @@ int jbd2_journal_stop(handle_t *handle)
jbd_debug(4, "Handle %p going down\n", handle);
trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
- transaction->t_tid,
+ handle->h_transaction->t_tid,
handle->h_type, handle->h_line_no,
jiffies - handle->h_start_jiffies,
handle->h_sync, handle->h_requested_credits,
@@ -1664,13 +1518,33 @@ int jbd2_journal_stop(handle_t *handle)
lock_map_release(&handle->h_lockdep_map);
- if (handle->h_rsv_handle)
- jbd2_journal_free_reserved(handle->h_rsv_handle);
-free_and_exit:
jbd2_free_handle(handle);
return err;
}
+/**
+ * int jbd2_journal_force_commit() - force any uncommitted transactions
+ * @journal: journal to force
+ *
+ * For synchronous operations: force any uncommitted transactions
+ * to disk. May seem kludgy, but it reuses all the handle batching
+ * code in a very simple manner.
+ */
+int jbd2_journal_force_commit(journal_t *journal)
+{
+ handle_t *handle;
+ int ret;
+
+ handle = jbd2_journal_start(journal, 1);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ } else {
+ handle->h_sync = 1;
+ ret = jbd2_journal_stop(handle);
+ }
+ return ret;
+}
+
/*
*
* List management code snippets: various functions for manipulating the
@@ -1727,10 +1601,10 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
* Remove a buffer from the appropriate transaction list.
*
* Note that this function can *change* the value of
- * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
- * t_reserved_list. If the caller is holding onto a copy of one of these
- * pointers, it could go bad. Generally the caller needs to re-read the
- * pointer from the transaction_t.
+ * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
+ * t_log_list or t_reserved_list. If the caller is holding onto a copy of one
+ * of these pointers, it could go bad. Generally the caller needs to re-read
+ * the pointer from the transaction_t.
*
* Called under j_list_lock.
*/
@@ -1760,9 +1634,15 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
case BJ_Forget:
list = &transaction->t_forget;
break;
+ case BJ_IO:
+ list = &transaction->t_iobuf_list;
+ break;
case BJ_Shadow:
list = &transaction->t_shadow_list;
break;
+ case BJ_LogCtl:
+ list = &transaction->t_log_list;
+ break;
case BJ_Reserved:
list = &transaction->t_reserved_list;
break;
@@ -2154,23 +2034,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
* void jbd2_journal_invalidatepage()
* @journal: journal to use for flush...
* @page: page to flush
- * @offset: start of the range to invalidate
- * @length: length of the range to invalidate
+ * @offset: length of page to invalidate.
*
- * Reap page buffers containing data after in the specified range in page.
- * Can return -EBUSY if buffers are part of the committing transaction and
- * the page is straddling i_size. Caller then has to wait for current commit
- * and try again.
+ * Reap page buffers containing data after offset in page. Can return -EBUSY
+ * if buffers are part of the committing transaction and the page is straddling
+ * i_size. Caller then has to wait for current commit and try again.
*/
int jbd2_journal_invalidatepage(journal_t *journal,
struct page *page,
- unsigned int offset,
- unsigned int length)
+ unsigned long offset)
{
struct buffer_head *head, *bh, *next;
- unsigned int stop = offset + length;
unsigned int curr_off = 0;
- int partial_page = (offset || length < PAGE_CACHE_SIZE);
int may_free = 1;
int ret = 0;
@@ -2179,8 +2054,6 @@ int jbd2_journal_invalidatepage(journal_t *journal,
if (!page_has_buffers(page))
return 0;
- BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
-
/* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be
* cautious in our locking. */
@@ -2190,13 +2063,10 @@ int jbd2_journal_invalidatepage(journal_t *journal,
unsigned int next_off = curr_off + bh->b_size;
next = bh->b_this_page;
- if (next_off > stop)
- return 0;
-
if (offset <= curr_off) {
/* This block is wholly outside the truncation point */
lock_buffer(bh);
- ret = journal_unmap_buffer(journal, bh, partial_page);
+ ret = journal_unmap_buffer(journal, bh, offset > 0);
unlock_buffer(bh);
if (ret < 0)
return ret;
@@ -2207,7 +2077,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
} while (bh != head);
- if (!partial_page) {
+ if (!offset) {
if (may_free && try_to_free_buffers(page))
J_ASSERT(!page_has_buffers(page));
}
@@ -2268,9 +2138,15 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
case BJ_Forget:
list = &transaction->t_forget;
break;
+ case BJ_IO:
+ list = &transaction->t_iobuf_list;
+ break;
case BJ_Shadow:
list = &transaction->t_shadow_list;
break;
+ case BJ_LogCtl:
+ list = &transaction->t_log_list;
+ break;
case BJ_Reserved:
list = &transaction->t_reserved_list;
break;
@@ -2372,12 +2248,10 @@ void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
{
transaction_t *transaction = handle->h_transaction;
- journal_t *journal;
+ journal_t *journal = transaction->t_journal;
- WARN_ON(!transaction);
if (is_handle_aborted(handle))
- return -EROFS;
- journal = transaction->t_journal;
+ return -EIO;
jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
transaction->t_tid);
diff --git a/trunk/fs/jffs2/dir.c b/trunk/fs/jffs2/dir.c
index e3aac222472e..acd46a4160cb 100644
--- a/trunk/fs/jffs2/dir.c
+++ b/trunk/fs/jffs2/dir.c
@@ -22,7 +22,7 @@
#include
#include "nodelist.h"
-static int jffs2_readdir (struct file *, struct dir_context *);
+static int jffs2_readdir (struct file *, void *, filldir_t);
static int jffs2_create (struct inode *,struct dentry *,umode_t,
bool);
@@ -40,7 +40,7 @@ static int jffs2_rename (struct inode *, struct dentry *,
const struct file_operations jffs2_dir_operations =
{
.read = generic_read_dir,
- .iterate = jffs2_readdir,
+ .readdir = jffs2_readdir,
.unlocked_ioctl=jffs2_ioctl,
.fsync = jffs2_fsync,
.llseek = generic_file_llseek,
@@ -114,40 +114,60 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
/***********************************************************************/
-static int jffs2_readdir(struct file *file, struct dir_context *ctx)
+static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ struct jffs2_inode_info *f;
+ struct inode *inode = file_inode(filp);
struct jffs2_full_dirent *fd;
- unsigned long curofs = 1;
+ unsigned long offset, curofs;
- jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", inode->i_ino);
+ jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n",
+ file_inode(filp)->i_ino);
- if (!dir_emit_dots(file, ctx))
- return 0;
+ f = JFFS2_INODE_INFO(inode);
+
+ offset = filp->f_pos;
+
+ if (offset == 0) {
+ jffs2_dbg(1, "Dirent 0: \".\", ino #%lu\n", inode->i_ino);
+ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
+ goto out;
+ offset++;
+ }
+ if (offset == 1) {
+ unsigned long pino = parent_ino(filp->f_path.dentry);
+ jffs2_dbg(1, "Dirent 1: \"..\", ino #%lu\n", pino);
+ if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0)
+ goto out;
+ offset++;
+ }
+ curofs=1;
mutex_lock(&f->sem);
for (fd = f->dents; fd; fd = fd->next) {
+
curofs++;
- /* First loop: curofs = 2; pos = 2 */
- if (curofs < ctx->pos) {
+ /* First loop: curofs = 2; offset = 2 */
+ if (curofs < offset) {
jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
- fd->name, fd->ino, fd->type, curofs, (unsigned long)ctx->pos);
+ fd->name, fd->ino, fd->type, curofs, offset);
continue;
}
if (!fd->ino) {
jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n",
fd->name);
- ctx->pos++;
+ offset++;
continue;
}
jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n",
- (unsigned long)ctx->pos, fd->name, fd->ino, fd->type);
- if (!dir_emit(ctx, fd->name, strlen(fd->name), fd->ino, fd->type))
+ offset, fd->name, fd->ino, fd->type);
+ if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0)
break;
- ctx->pos++;
+ offset++;
}
mutex_unlock(&f->sem);
+ out:
+ filp->f_pos = offset;
return 0;
}
diff --git a/trunk/fs/jfs/jfs_dtree.c b/trunk/fs/jfs/jfs_dtree.c
index 9f4ed13d9f15..0ddbeceafc62 100644
--- a/trunk/fs/jfs/jfs_dtree.c
+++ b/trunk/fs/jfs/jfs_dtree.c
@@ -3002,9 +3002,9 @@ static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent)
* return: offset = (pn, index) of start entry
* of next jfs_readdir()/dtRead()
*/
-int jfs_readdir(struct file *file, struct dir_context *ctx)
+int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *ip = file_inode(file);
+ struct inode *ip = file_inode(filp);
struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab;
int rc = 0;
loff_t dtpos; /* legacy OS/2 style position */
@@ -3033,7 +3033,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
int overflow, fix_page, page_fixed = 0;
static int unique_pos = 2; /* If we can't fix broken index */
- if (ctx->pos == DIREND)
+ if (filp->f_pos == DIREND)
return 0;
if (DO_INDEX(ip)) {
@@ -3045,7 +3045,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
*/
do_index = 1;
- dir_index = (u32) ctx->pos;
+ dir_index = (u32) filp->f_pos;
if (dir_index > 1) {
struct dir_table_slot dirtab_slot;
@@ -3053,25 +3053,25 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
if (dtEmpty(ip) ||
(dir_index >= JFS_IP(ip)->next_index)) {
/* Stale position. Directory has shrunk */
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
return 0;
}
repeat:
rc = read_index(ip, dir_index, &dirtab_slot);
if (rc) {
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
return rc;
}
if (dirtab_slot.flag == DIR_INDEX_FREE) {
if (loop_count++ > JFS_IP(ip)->next_index) {
jfs_err("jfs_readdir detected "
"infinite loop!");
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
return 0;
}
dir_index = le32_to_cpu(dirtab_slot.addr2);
if (dir_index == -1) {
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
return 0;
}
goto repeat;
@@ -3080,13 +3080,13 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
index = dirtab_slot.slot;
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc) {
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
return 0;
}
if (p->header.flag & BT_INTERNAL) {
jfs_err("jfs_readdir: bad index table");
DT_PUTPAGE(mp);
- ctx->pos = -1;
+ filp->f_pos = -1;
return 0;
}
} else {
@@ -3094,22 +3094,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
/*
* self "."
*/
- ctx->pos = 0;
- if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
+ filp->f_pos = 0;
+ if (filldir(dirent, ".", 1, 0, ip->i_ino,
+ DT_DIR))
return 0;
}
/*
* parent ".."
*/
- ctx->pos = 1;
- if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
+ filp->f_pos = 1;
+ if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
return 0;
/*
* Find first entry of left-most leaf
*/
if (dtEmpty(ip)) {
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
return 0;
}
@@ -3127,19 +3128,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
* pn > 0: Real entries, pn=1 -> leftmost page
* pn = index = -1: No more entries
*/
- dtpos = ctx->pos;
+ dtpos = filp->f_pos;
if (dtpos == 0) {
/* build "." entry */
- if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
+
+ if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
+ DT_DIR))
return 0;
dtoffset->index = 1;
- ctx->pos = dtpos;
+ filp->f_pos = dtpos;
}
if (dtoffset->pn == 0) {
if (dtoffset->index == 1) {
/* build ".." entry */
- if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
+
+ if (filldir(dirent, "..", 2, filp->f_pos,
+ PARENT(ip), DT_DIR))
return 0;
} else {
jfs_err("jfs_readdir called with "
@@ -3147,18 +3152,18 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
}
dtoffset->pn = 1;
dtoffset->index = 0;
- ctx->pos = dtpos;
+ filp->f_pos = dtpos;
}
if (dtEmpty(ip)) {
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
return 0;
}
- if ((rc = dtReadNext(ip, &ctx->pos, &btstack))) {
+ if ((rc = dtReadNext(ip, &filp->f_pos, &btstack))) {
jfs_err("jfs_readdir: unexpected rc = %d "
"from dtReadNext", rc);
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
return 0;
}
/* get start leaf page and index */
@@ -3166,7 +3171,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
/* offset beyond directory eof ? */
if (bn < 0) {
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
return 0;
}
}
@@ -3175,7 +3180,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
if (dirent_buf == 0) {
DT_PUTPAGE(mp);
jfs_warn("jfs_readdir: __get_free_page failed!");
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
return -ENOMEM;
}
@@ -3290,9 +3295,9 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
jfs_dirent = (struct jfs_dirent *) dirent_buf;
while (jfs_dirents--) {
- ctx->pos = jfs_dirent->position;
- if (!dir_emit(ctx, jfs_dirent->name,
- jfs_dirent->name_len,
+ filp->f_pos = jfs_dirent->position;
+ if (filldir(dirent, jfs_dirent->name,
+ jfs_dirent->name_len, filp->f_pos,
jfs_dirent->ino, DT_UNKNOWN))
goto out;
jfs_dirent = next_jfs_dirent(jfs_dirent);
@@ -3304,7 +3309,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
}
if (!overflow && (bn == 0)) {
- ctx->pos = DIREND;
+ filp->f_pos = DIREND;
break;
}
diff --git a/trunk/fs/jfs/jfs_dtree.h b/trunk/fs/jfs/jfs_dtree.h
index fd4169e6e698..2545bb317235 100644
--- a/trunk/fs/jfs/jfs_dtree.h
+++ b/trunk/fs/jfs/jfs_dtree.h
@@ -265,5 +265,5 @@ extern int dtDelete(tid_t tid, struct inode *ip, struct component_name * key,
extern int dtModify(tid_t tid, struct inode *ip, struct component_name * key,
ino_t * orig_ino, ino_t new_ino, int flag);
-extern int jfs_readdir(struct file *file, struct dir_context *ctx);
+extern int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir);
#endif /* !_H_JFS_DTREE */
diff --git a/trunk/fs/jfs/jfs_metapage.c b/trunk/fs/jfs/jfs_metapage.c
index 9e3aaff11f89..6740d34cd82b 100644
--- a/trunk/fs/jfs/jfs_metapage.c
+++ b/trunk/fs/jfs/jfs_metapage.c
@@ -571,10 +571,9 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
return ret;
}
-static void metapage_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void metapage_invalidatepage(struct page *page, unsigned long offset)
{
- BUG_ON(offset || length < PAGE_CACHE_SIZE);
+ BUG_ON(offset);
BUG_ON(PageWriteback(page));
diff --git a/trunk/fs/jfs/namei.c b/trunk/fs/jfs/namei.c
index 89186b7b9002..3b91a7ad6086 100644
--- a/trunk/fs/jfs/namei.c
+++ b/trunk/fs/jfs/namei.c
@@ -1529,7 +1529,7 @@ const struct inode_operations jfs_dir_inode_operations = {
const struct file_operations jfs_dir_operations = {
.read = generic_read_dir,
- .iterate = jfs_readdir,
+ .readdir = jfs_readdir,
.fsync = jfs_fsync,
.unlocked_ioctl = jfs_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/trunk/fs/libfs.c b/trunk/fs/libfs.c
index c3a0837fb861..916da8c4158b 100644
--- a/trunk/fs/libfs.c
+++ b/trunk/fs/libfs.c
@@ -135,40 +135,60 @@ static inline unsigned char dt_type(struct inode *inode)
* both impossible due to the lock on directory.
*/
-int dcache_readdir(struct file *file, struct dir_context *ctx)
+int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- struct dentry *dentry = file->f_path.dentry;
- struct dentry *cursor = file->private_data;
+ struct dentry *dentry = filp->f_path.dentry;
+ struct dentry *cursor = filp->private_data;
struct list_head *p, *q = &cursor->d_u.d_child;
+ ino_t ino;
+ int i = filp->f_pos;
- if (!dir_emit_dots(file, ctx))
- return 0;
- spin_lock(&dentry->d_lock);
- if (ctx->pos == 2)
- list_move(q, &dentry->d_subdirs);
-
- for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
- struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- if (!simple_positive(next)) {
- spin_unlock(&next->d_lock);
- continue;
- }
+ switch (i) {
+ case 0:
+ ino = dentry->d_inode->i_ino;
+ if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+ break;
+ filp->f_pos++;
+ i++;
+ /* fallthrough */
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
+ break;
+ filp->f_pos++;
+ i++;
+ /* fallthrough */
+ default:
+ spin_lock(&dentry->d_lock);
+ if (filp->f_pos == 2)
+ list_move(q, &dentry->d_subdirs);
- spin_unlock(&next->d_lock);
- spin_unlock(&dentry->d_lock);
- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
- next->d_inode->i_ino, dt_type(next->d_inode)))
- return 0;
- spin_lock(&dentry->d_lock);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- /* next is still alive */
- list_move(q, p);
- spin_unlock(&next->d_lock);
- p = q;
- ctx->pos++;
+ for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
+ struct dentry *next;
+ next = list_entry(p, struct dentry, d_u.d_child);
+ spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ if (!simple_positive(next)) {
+ spin_unlock(&next->d_lock);
+ continue;
+ }
+
+ spin_unlock(&next->d_lock);
+ spin_unlock(&dentry->d_lock);
+ if (filldir(dirent, next->d_name.name,
+ next->d_name.len, filp->f_pos,
+ next->d_inode->i_ino,
+ dt_type(next->d_inode)) < 0)
+ return 0;
+ spin_lock(&dentry->d_lock);
+ spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ /* next is still alive */
+ list_move(q, p);
+ spin_unlock(&next->d_lock);
+ p = q;
+ filp->f_pos++;
+ }
+ spin_unlock(&dentry->d_lock);
}
- spin_unlock(&dentry->d_lock);
return 0;
}
@@ -182,7 +202,7 @@ const struct file_operations simple_dir_operations = {
.release = dcache_dir_close,
.llseek = dcache_dir_lseek,
.read = generic_read_dir,
- .iterate = dcache_readdir,
+ .readdir = dcache_readdir,
.fsync = noop_fsync,
};
diff --git a/trunk/fs/logfs/dir.c b/trunk/fs/logfs/dir.c
index 6bdc347008f5..b82751082112 100644
--- a/trunk/fs/logfs/dir.c
+++ b/trunk/fs/logfs/dir.c
@@ -281,23 +281,17 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
/* FIXME: readdir currently has it's own dir_walk code. I don't see a good
* way to combine the two copies */
-static int logfs_readdir(struct file *file, struct dir_context *ctx)
+#define IMPLICIT_NODES 2
+static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
{
struct inode *dir = file_inode(file);
- loff_t pos;
+ loff_t pos = file->f_pos - IMPLICIT_NODES;
struct page *page;
struct logfs_disk_dentry *dd;
+ int full;
- if (ctx->pos < 0)
- return -EINVAL;
-
- if (!dir_emit_dots(file, ctx))
- return 0;
-
- pos = ctx->pos - 2;
BUG_ON(pos < 0);
- for (;; pos++, ctx->pos++) {
- bool full;
+ for (;; pos++) {
if (beyond_eof(dir, pos))
break;
if (!logfs_exist_block(dir, pos)) {
@@ -312,17 +306,42 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx)
dd = kmap(page);
BUG_ON(dd->namelen == 0);
- full = !dir_emit(ctx, (char *)dd->name,
- be16_to_cpu(dd->namelen),
- be64_to_cpu(dd->ino), dd->type);
+ full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
+ pos, be64_to_cpu(dd->ino), dd->type);
kunmap(page);
page_cache_release(page);
if (full)
break;
}
+
+ file->f_pos = pos + IMPLICIT_NODES;
return 0;
}
+static int logfs_readdir(struct file *file, void *buf, filldir_t filldir)
+{
+ struct inode *inode = file_inode(file);
+ ino_t pino = parent_ino(file->f_dentry);
+ int err;
+
+ if (file->f_pos < 0)
+ return -EINVAL;
+
+ if (file->f_pos == 0) {
+ if (filldir(buf, ".", 1, 1, inode->i_ino, DT_DIR) < 0)
+ return 0;
+ file->f_pos++;
+ }
+ if (file->f_pos == 1) {
+ if (filldir(buf, "..", 2, 2, pino, DT_DIR) < 0)
+ return 0;
+ file->f_pos++;
+ }
+
+ err = __logfs_readdir(file, buf, filldir);
+ return err;
+}
+
static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name)
{
dd->namelen = cpu_to_be16(name->len);
@@ -795,7 +814,7 @@ const struct inode_operations logfs_dir_iops = {
const struct file_operations logfs_dir_fops = {
.fsync = logfs_fsync,
.unlocked_ioctl = logfs_ioctl,
- .iterate = logfs_readdir,
+ .readdir = logfs_readdir,
.read = generic_read_dir,
.llseek = default_llseek,
};
diff --git a/trunk/fs/logfs/file.c b/trunk/fs/logfs/file.c
index 57914fc32b62..c2219a6dd3c8 100644
--- a/trunk/fs/logfs/file.c
+++ b/trunk/fs/logfs/file.c
@@ -159,8 +159,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
return __logfs_writepage(page);
}
-static void logfs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void logfs_invalidatepage(struct page *page, unsigned long offset)
{
struct logfs_block *block = logfs_block(page);
diff --git a/trunk/fs/logfs/segment.c b/trunk/fs/logfs/segment.c
index d448a777166b..038da0991794 100644
--- a/trunk/fs/logfs/segment.c
+++ b/trunk/fs/logfs/segment.c
@@ -884,8 +884,7 @@ static struct logfs_area *alloc_area(struct super_block *sb)
return area;
}
-static void map_invalidatepage(struct page *page, unsigned int o,
- unsigned int l)
+static void map_invalidatepage(struct page *page, unsigned long l)
{
return;
}
diff --git a/trunk/fs/minix/dir.c b/trunk/fs/minix/dir.c
index 08c442902fcd..a9ed6f36e6ea 100644
--- a/trunk/fs/minix/dir.c
+++ b/trunk/fs/minix/dir.c
@@ -16,12 +16,12 @@
typedef struct minix_dir_entry minix_dirent;
typedef struct minix3_dir_entry minix3_dirent;
-static int minix_readdir(struct file *, struct dir_context *);
+static int minix_readdir(struct file *, void *, filldir_t);
const struct file_operations minix_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = minix_readdir,
+ .readdir = minix_readdir,
.fsync = generic_file_fsync,
};
@@ -82,23 +82,22 @@ static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
return (void*)((char*)de + sbi->s_dirsize);
}
-static int minix_readdir(struct file *file, struct dir_context *ctx)
+static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ unsigned long pos = filp->f_pos;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
+ unsigned offset = pos & ~PAGE_CACHE_MASK;
+ unsigned long n = pos >> PAGE_CACHE_SHIFT;
+ unsigned long npages = dir_pages(inode);
struct minix_sb_info *sbi = minix_sb(sb);
unsigned chunk_size = sbi->s_dirsize;
- unsigned long npages = dir_pages(inode);
- unsigned long pos = ctx->pos;
- unsigned offset;
- unsigned long n;
+ char *name;
+ __u32 inumber;
- ctx->pos = pos = (pos + chunk_size-1) & ~(chunk_size-1);
+ pos = (pos + chunk_size-1) & ~(chunk_size-1);
if (pos >= inode->i_size)
- return 0;
-
- offset = pos & ~PAGE_CACHE_MASK;
- n = pos >> PAGE_CACHE_SHIFT;
+ goto done;
for ( ; n < npages; n++, offset = 0) {
char *p, *kaddr, *limit;
@@ -110,8 +109,6 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
p = kaddr+offset;
limit = kaddr + minix_last_byte(inode, n) - chunk_size;
for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
- const char *name;
- __u32 inumber;
if (sbi->s_version == MINIX_V3) {
minix3_dirent *de3 = (minix3_dirent *)p;
name = de3->name;
@@ -122,17 +119,24 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
inumber = de->inode;
}
if (inumber) {
+ int over;
+
unsigned l = strnlen(name, sbi->s_namelen);
- if (!dir_emit(ctx, name, l,
- inumber, DT_UNKNOWN)) {
+ offset = p - kaddr;
+ over = filldir(dirent, name, l,
+ (n << PAGE_CACHE_SHIFT) | offset,
+ inumber, DT_UNKNOWN);
+ if (over) {
dir_put_page(page);
- return 0;
+ goto done;
}
}
- ctx->pos += chunk_size;
}
dir_put_page(page);
}
+
+done:
+ filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
return 0;
}
diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c
index 9ed9361223c0..85e40d1c0a8f 100644
--- a/trunk/fs/namei.c
+++ b/trunk/fs/namei.c
@@ -1976,7 +1976,7 @@ static int path_lookupat(int dfd, const char *name,
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY) {
- if (!can_lookup(nd->inode)) {
+ if (!nd->inode->i_op->lookup) {
path_put(&nd->path);
err = -ENOTDIR;
}
@@ -2850,7 +2850,7 @@ static int do_last(struct nameidata *nd, struct path *path,
if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
goto out;
error = -ENOTDIR;
- if ((nd->flags & LOOKUP_DIRECTORY) && !can_lookup(nd->inode))
+ if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
goto out;
audit_inode(name, nd->path.dentry, 0);
finish_open:
diff --git a/trunk/fs/ncpfs/dir.c b/trunk/fs/ncpfs/dir.c
index 0e7f00298213..816326093656 100644
--- a/trunk/fs/ncpfs/dir.c
+++ b/trunk/fs/ncpfs/dir.c
@@ -23,12 +23,12 @@
#include "ncp_fs.h"
-static void ncp_read_volume_list(struct file *, struct dir_context *,
+static void ncp_read_volume_list(struct file *, void *, filldir_t,
struct ncp_cache_control *);
-static void ncp_do_readdir(struct file *, struct dir_context *,
+static void ncp_do_readdir(struct file *, void *, filldir_t,
struct ncp_cache_control *);
-static int ncp_readdir(struct file *, struct dir_context *);
+static int ncp_readdir(struct file *, void *, filldir_t);
static int ncp_create(struct inode *, struct dentry *, umode_t, bool);
static struct dentry *ncp_lookup(struct inode *, struct dentry *, unsigned int);
@@ -49,7 +49,7 @@ const struct file_operations ncp_dir_operations =
{
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = ncp_readdir,
+ .readdir = ncp_readdir,
.unlocked_ioctl = ncp_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ncp_compat_ioctl,
@@ -424,9 +424,9 @@ static time_t ncp_obtain_mtime(struct dentry *dentry)
return ncp_date_dos2unix(i.modifyTime, i.modifyDate);
}
-static int ncp_readdir(struct file *file, struct dir_context *ctx)
+static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
struct page *page = NULL;
struct ncp_server *server = NCP_SERVER(inode);
@@ -440,7 +440,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
DDPRINTK("ncp_readdir: reading %s/%s, pos=%d\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
- (int) ctx->pos);
+ (int) filp->f_pos);
result = -EIO;
/* Do not generate '.' and '..' when server is dead. */
@@ -448,8 +448,16 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
goto out;
result = 0;
- if (!dir_emit_dots(file, ctx))
- goto out;
+ if (filp->f_pos == 0) {
+ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
+ goto out;
+ filp->f_pos = 1;
+ }
+ if (filp->f_pos == 1) {
+ if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR))
+ goto out;
+ filp->f_pos = 2;
+ }
page = grab_cache_page(&inode->i_data, 0);
if (!page)
@@ -461,7 +469,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
if (!PageUptodate(page) || !ctl.head.eof)
goto init_cache;
- if (ctx->pos == 2) {
+ if (filp->f_pos == 2) {
if (jiffies - ctl.head.time >= NCP_MAX_AGE(server))
goto init_cache;
@@ -471,10 +479,10 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
goto init_cache;
}
- if (ctx->pos > ctl.head.end)
+ if (filp->f_pos > ctl.head.end)
goto finished;
- ctl.fpos = ctx->pos + (NCP_DIRCACHE_START - 2);
+ ctl.fpos = filp->f_pos + (NCP_DIRCACHE_START - 2);
ctl.ofs = ctl.fpos / NCP_DIRCACHE_SIZE;
ctl.idx = ctl.fpos % NCP_DIRCACHE_SIZE;
@@ -489,21 +497,21 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
}
while (ctl.idx < NCP_DIRCACHE_SIZE) {
struct dentry *dent;
- bool over;
+ int res;
dent = ncp_dget_fpos(ctl.cache->dentry[ctl.idx],
- dentry, ctx->pos);
+ dentry, filp->f_pos);
if (!dent)
goto invalid_cache;
- over = !dir_emit(ctx, dent->d_name.name,
- dent->d_name.len,
+ res = filldir(dirent, dent->d_name.name,
+ dent->d_name.len, filp->f_pos,
dent->d_inode->i_ino, DT_UNKNOWN);
dput(dent);
- if (over)
+ if (res)
goto finished;
- ctx->pos += 1;
+ filp->f_pos += 1;
ctl.idx += 1;
- if (ctx->pos > ctl.head.end)
+ if (filp->f_pos > ctl.head.end)
goto finished;
}
if (ctl.page) {
@@ -540,9 +548,9 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
ctl.valid = 1;
read_really:
if (ncp_is_server_root(inode)) {
- ncp_read_volume_list(file, ctx, &ctl);
+ ncp_read_volume_list(filp, dirent, filldir, &ctl);
} else {
- ncp_do_readdir(file, ctx, &ctl);
+ ncp_do_readdir(filp, dirent, filldir, &ctl);
}
ctl.head.end = ctl.fpos - 1;
ctl.head.eof = ctl.valid;
@@ -565,11 +573,11 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
}
static int
-ncp_fill_cache(struct file *file, struct dir_context *ctx,
+ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
struct ncp_cache_control *ctrl, struct ncp_entry_info *entry,
int inval_childs)
{
- struct dentry *newdent, *dentry = file->f_path.dentry;
+ struct dentry *newdent, *dentry = filp->f_path.dentry;
struct inode *dir = dentry->d_inode;
struct ncp_cache_control ctl = *ctrl;
struct qstr qname;
@@ -658,15 +666,15 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
end_advance:
if (!valid)
ctl.valid = 0;
- if (!ctl.filled && (ctl.fpos == ctx->pos)) {
+ if (!ctl.filled && (ctl.fpos == filp->f_pos)) {
if (!ino)
ino = find_inode_number(dentry, &qname);
if (!ino)
ino = iunique(dir->i_sb, 2);
- ctl.filled = !dir_emit(ctx, qname.name, qname.len,
- ino, DT_UNKNOWN);
+ ctl.filled = filldir(dirent, qname.name, qname.len,
+ filp->f_pos, ino, DT_UNKNOWN);
if (!ctl.filled)
- ctx->pos += 1;
+ filp->f_pos += 1;
}
ctl.fpos += 1;
ctl.idx += 1;
@@ -675,10 +683,10 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
}
static void
-ncp_read_volume_list(struct file *file, struct dir_context *ctx,
+ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir,
struct ncp_cache_control *ctl)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
struct ncp_server *server = NCP_SERVER(inode);
struct ncp_volume_info info;
@@ -686,7 +694,7 @@ ncp_read_volume_list(struct file *file, struct dir_context *ctx,
int i;
DPRINTK("ncp_read_volume_list: pos=%ld\n",
- (unsigned long) ctx->pos);
+ (unsigned long) filp->f_pos);
for (i = 0; i < NCP_NUMBER_OF_VOLUMES; i++) {
int inval_dentry;
@@ -707,16 +715,16 @@ ncp_read_volume_list(struct file *file, struct dir_context *ctx,
}
inval_dentry = ncp_update_known_namespace(server, entry.i.volNumber, NULL);
entry.volume = entry.i.volNumber;
- if (!ncp_fill_cache(file, ctx, ctl, &entry, inval_dentry))
+ if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, inval_dentry))
return;
}
}
static void
-ncp_do_readdir(struct file *file, struct dir_context *ctx,
+ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir,
struct ncp_cache_control *ctl)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = filp->f_path.dentry;
struct inode *dir = dentry->d_inode;
struct ncp_server *server = NCP_SERVER(dir);
struct nw_search_sequence seq;
@@ -728,7 +736,7 @@ ncp_do_readdir(struct file *file, struct dir_context *ctx,
DPRINTK("ncp_do_readdir: %s/%s, fpos=%ld\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
- (unsigned long) ctx->pos);
+ (unsigned long) filp->f_pos);
PPRINTK("ncp_do_readdir: init %s, volnum=%d, dirent=%u\n",
dentry->d_name.name, NCP_FINFO(dir)->volNumber,
NCP_FINFO(dir)->dirEntNum);
@@ -770,7 +778,7 @@ ncp_do_readdir(struct file *file, struct dir_context *ctx,
rpl += onerpl;
rpls -= onerpl;
entry.volume = entry.i.volNumber;
- if (!ncp_fill_cache(file, ctx, ctl, &entry, 0))
+ if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, 0))
break;
}
} while (more);
@@ -1021,6 +1029,15 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
DPRINTK("ncp_rmdir: removing %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
+ /*
+ * fail with EBUSY if there are still references to this
+ * directory.
+ */
+ dentry_unhash(dentry);
+ error = -EBUSY;
+ if (!d_unhashed(dentry))
+ goto out;
+
len = sizeof(__name);
error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, !ncp_preserve_case(dir));
diff --git a/trunk/fs/nfs/dir.c b/trunk/fs/nfs/dir.c
index 5d051419527b..e093e73178b7 100644
--- a/trunk/fs/nfs/dir.c
+++ b/trunk/fs/nfs/dir.c
@@ -46,7 +46,7 @@
static int nfs_opendir(struct inode *, struct file *);
static int nfs_closedir(struct inode *, struct file *);
-static int nfs_readdir(struct file *, struct dir_context *);
+static int nfs_readdir(struct file *, void *, filldir_t);
static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
static void nfs_readdir_clear_array(struct page*);
@@ -54,7 +54,7 @@ static void nfs_readdir_clear_array(struct page*);
const struct file_operations nfs_dir_operations = {
.llseek = nfs_llseek_dir,
.read = generic_read_dir,
- .iterate = nfs_readdir,
+ .readdir = nfs_readdir,
.open = nfs_opendir,
.release = nfs_closedir,
.fsync = nfs_fsync_dir,
@@ -147,7 +147,6 @@ typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int);
typedef struct {
struct file *file;
struct page *page;
- struct dir_context *ctx;
unsigned long page_index;
u64 *dir_cookie;
u64 last_cookie;
@@ -253,7 +252,7 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
static
int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
{
- loff_t diff = desc->ctx->pos - desc->current_index;
+ loff_t diff = desc->file->f_pos - desc->current_index;
unsigned int index;
if (diff < 0)
@@ -290,7 +289,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
|| (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))) {
ctx->duped = 0;
ctx->attr_gencount = nfsi->attr_gencount;
- } else if (new_pos < desc->ctx->pos) {
+ } else if (new_pos < desc->file->f_pos) {
if (ctx->duped > 0
&& ctx->dup_cookie == *desc->dir_cookie) {
if (printk_ratelimit()) {
@@ -308,7 +307,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
ctx->dup_cookie = *desc->dir_cookie;
ctx->duped = -1;
}
- desc->ctx->pos = new_pos;
+ desc->file->f_pos = new_pos;
desc->cache_entry_index = i;
return 0;
}
@@ -406,13 +405,13 @@ int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
}
static
-bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx)
+bool nfs_use_readdirplus(struct inode *dir, struct file *filp)
{
if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
return false;
if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags))
return true;
- if (ctx->pos == 0)
+ if (filp->f_pos == 0)
return true;
return false;
}
@@ -703,7 +702,8 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
* Once we've found the start of the dirent within a page: fill 'er up...
*/
static
-int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
+int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
+ filldir_t filldir)
{
struct file *file = desc->file;
int i = 0;
@@ -721,12 +721,13 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
struct nfs_cache_array_entry *ent;
ent = &array->array[i];
- if (!dir_emit(desc->ctx, ent->string.name, ent->string.len,
- nfs_compat_user_ino64(ent->ino), ent->d_type)) {
+ if (filldir(dirent, ent->string.name, ent->string.len,
+ file->f_pos, nfs_compat_user_ino64(ent->ino),
+ ent->d_type) < 0) {
desc->eof = 1;
break;
}
- desc->ctx->pos++;
+ file->f_pos++;
if (i < (array->size-1))
*desc->dir_cookie = array->array[i+1].cookie;
else
@@ -758,7 +759,8 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
* directory in the page cache by the time we get here.
*/
static inline
-int uncached_readdir(nfs_readdir_descriptor_t *desc)
+int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
+ filldir_t filldir)
{
struct page *page = NULL;
int status;
@@ -783,7 +785,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
if (status < 0)
goto out_release;
- status = nfs_do_filldir(desc);
+ status = nfs_do_filldir(desc, dirent, filldir);
out:
dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
@@ -798,36 +800,35 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
last cookie cache takes care of the common case of reading the
whole directory.
*/
-static int nfs_readdir(struct file *file, struct dir_context *ctx)
+static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;
- struct nfs_open_dir_context *dir_ctx = file->private_data;
+ struct nfs_open_dir_context *dir_ctx = filp->private_data;
int res;
dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
- (long long)ctx->pos);
+ (long long)filp->f_pos);
nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
/*
- * ctx->pos points to the dirent entry number.
+ * filp->f_pos points to the dirent entry number.
* *desc->dir_cookie has the cookie for the next entry. We have
* to either find the entry with the appropriate number or
* revalidate the cookie.
*/
memset(desc, 0, sizeof(*desc));
- desc->file = file;
- desc->ctx = ctx;
+ desc->file = filp;
desc->dir_cookie = &dir_ctx->dir_cookie;
desc->decode = NFS_PROTO(inode)->decode_dirent;
- desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
+ desc->plus = nfs_use_readdirplus(inode, filp) ? 1 : 0;
nfs_block_sillyrename(dentry);
- res = nfs_revalidate_mapping(inode, file->f_mapping);
+ res = nfs_revalidate_mapping(inode, filp->f_mapping);
if (res < 0)
goto out;
@@ -839,7 +840,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
/* This means either end of directory */
if (*desc->dir_cookie && desc->eof == 0) {
/* Or that the server has 'lost' a cookie */
- res = uncached_readdir(desc);
+ res = uncached_readdir(desc, dirent, filldir);
if (res == 0)
continue;
}
@@ -856,7 +857,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
if (res < 0)
break;
- res = nfs_do_filldir(desc);
+ res = nfs_do_filldir(desc, dirent, filldir);
if (res < 0)
break;
} while (!desc->eof);
diff --git a/trunk/fs/nfs/file.c b/trunk/fs/nfs/file.c
index 6b4a79f4ad1d..a87a44f84113 100644
--- a/trunk/fs/nfs/file.c
+++ b/trunk/fs/nfs/file.c
@@ -451,13 +451,11 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
* - Called if either PG_private or PG_fscache is set on the page
* - Caller holds page lock
*/
-static void nfs_invalidate_page(struct page *page, unsigned int offset,
- unsigned int length)
+static void nfs_invalidate_page(struct page *page, unsigned long offset)
{
- dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
- page, offset, length);
+ dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset);
- if (offset != 0 || length < PAGE_CACHE_SIZE)
+ if (offset != 0)
return;
/* Cancel any unstarted writes on this page */
nfs_wb_page_cancel(page_file_mapping(page)->host, page);
diff --git a/trunk/fs/nfsd/nfs4recover.c b/trunk/fs/nfsd/nfs4recover.c
index 105a3b080d12..4e9a21db867a 100644
--- a/trunk/fs/nfsd/nfs4recover.c
+++ b/trunk/fs/nfsd/nfs4recover.c
@@ -240,16 +240,11 @@ struct name_list {
struct list_head list;
};
-struct nfs4_dir_ctx {
- struct dir_context ctx;
- struct list_head names;
-};
-
static int
nfsd4_build_namelist(void *arg, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
- struct nfs4_dir_ctx *ctx = arg;
+ struct list_head *names = arg;
struct name_list *entry;
if (namlen != HEXDIR_LEN - 1)
@@ -259,7 +254,7 @@ nfsd4_build_namelist(void *arg, const char *name, int namlen,
return -ENOMEM;
memcpy(entry->name, name, HEXDIR_LEN - 1);
entry->name[HEXDIR_LEN - 1] = '\0';
- list_add(&entry->list, &ctx->names);
+ list_add(&entry->list, names);
return 0;
}
@@ -268,10 +263,7 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
{
const struct cred *original_cred;
struct dentry *dir = nn->rec_file->f_path.dentry;
- struct nfs4_dir_ctx ctx = {
- .ctx.actor = nfsd4_build_namelist,
- .names = LIST_HEAD_INIT(ctx.names)
- };
+ LIST_HEAD(names);
int status;
status = nfs4_save_creds(&original_cred);
@@ -284,11 +276,11 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
return status;
}
- status = iterate_dir(nn->rec_file, &ctx.ctx);
+ status = vfs_readdir(nn->rec_file, nfsd4_build_namelist, &names);
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
- while (!list_empty(&ctx.names)) {
+ while (!list_empty(&names)) {
struct name_list *entry;
- entry = list_entry(ctx.names.next, struct name_list, list);
+ entry = list_entry(names.next, struct name_list, list);
if (!status) {
struct dentry *dentry;
dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1);
diff --git a/trunk/fs/nfsd/vfs.c b/trunk/fs/nfsd/vfs.c
index a6bc8a7423db..84ce601d8063 100644
--- a/trunk/fs/nfsd/vfs.c
+++ b/trunk/fs/nfsd/vfs.c
@@ -1912,7 +1912,6 @@ struct buffered_dirent {
};
struct readdir_data {
- struct dir_context ctx;
char *dirent;
size_t used;
int full;
@@ -1944,15 +1943,13 @@ static int nfsd_buffered_filldir(void *__buf, const char *name, int namlen,
static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
struct readdir_cd *cdp, loff_t *offsetp)
{
+ struct readdir_data buf;
struct buffered_dirent *de;
int host_err;
int size;
loff_t offset;
- struct readdir_data buf = {
- .ctx.actor = nfsd_buffered_filldir,
- .dirent = (void *)__get_free_page(GFP_KERNEL)
- };
+ buf.dirent = (void *)__get_free_page(GFP_KERNEL);
if (!buf.dirent)
return nfserrno(-ENOMEM);
@@ -1966,7 +1963,7 @@ static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
buf.used = 0;
buf.full = 0;
- host_err = iterate_dir(file, &buf.ctx);
+ host_err = vfs_readdir(file, nfsd_buffered_filldir, &buf);
if (buf.full)
host_err = 0;
diff --git a/trunk/fs/nilfs2/dir.c b/trunk/fs/nilfs2/dir.c
index 197a63e9d102..f30b017740a7 100644
--- a/trunk/fs/nilfs2/dir.c
+++ b/trunk/fs/nilfs2/dir.c
@@ -256,18 +256,22 @@ static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
}
-static int nilfs_readdir(struct file *file, struct dir_context *ctx)
+static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- loff_t pos = ctx->pos;
- struct inode *inode = file_inode(file);
+ loff_t pos = filp->f_pos;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
unsigned int offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
unsigned long npages = dir_pages(inode);
/* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
+ unsigned char *types = NULL;
+ int ret;
if (pos > inode->i_size - NILFS_DIR_REC_LEN(1))
- return 0;
+ goto success;
+
+ types = nilfs_filetype_table;
for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
@@ -277,8 +281,9 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
if (IS_ERR(page)) {
nilfs_error(sb, __func__, "bad page in #%lu",
inode->i_ino);
- ctx->pos += PAGE_CACHE_SIZE - offset;
- return -EIO;
+ filp->f_pos += PAGE_CACHE_SIZE - offset;
+ ret = -EIO;
+ goto done;
}
kaddr = page_address(page);
de = (struct nilfs_dir_entry *)(kaddr + offset);
@@ -288,28 +293,35 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
if (de->rec_len == 0) {
nilfs_error(sb, __func__,
"zero-length directory entry");
+ ret = -EIO;
nilfs_put_page(page);
- return -EIO;
+ goto done;
}
if (de->inode) {
- unsigned char t;
+ int over;
+ unsigned char d_type = DT_UNKNOWN;
- if (de->file_type < NILFS_FT_MAX)
- t = nilfs_filetype_table[de->file_type];
- else
- t = DT_UNKNOWN;
+ if (types && de->file_type < NILFS_FT_MAX)
+ d_type = types[de->file_type];
- if (!dir_emit(ctx, de->name, de->name_len,
- le64_to_cpu(de->inode), t)) {
+ offset = (char *)de - kaddr;
+ over = filldir(dirent, de->name, de->name_len,
+ (n<inode), d_type);
+ if (over) {
nilfs_put_page(page);
- return 0;
+ goto success;
}
}
- ctx->pos += nilfs_rec_len_from_disk(de->rec_len);
+ filp->f_pos += nilfs_rec_len_from_disk(de->rec_len);
}
nilfs_put_page(page);
}
- return 0;
+
+success:
+ ret = 0;
+done:
+ return ret;
}
/*
@@ -666,7 +678,7 @@ int nilfs_empty_dir(struct inode *inode)
const struct file_operations nilfs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = nilfs_readdir,
+ .readdir = nilfs_readdir,
.unlocked_ioctl = nilfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = nilfs_compat_ioctl,
diff --git a/trunk/fs/ntfs/aops.c b/trunk/fs/ntfs/aops.c
index d267ea6aa1a0..fa9c05f97af4 100644
--- a/trunk/fs/ntfs/aops.c
+++ b/trunk/fs/ntfs/aops.c
@@ -1372,7 +1372,7 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
* The page may have dirty, unmapped buffers. Make them
* freeable here, so the page does not leak.
*/
- block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ block_invalidatepage(page, 0);
unlock_page(page);
ntfs_debug("Write outside i_size - truncated?");
return 0;
diff --git a/trunk/fs/ntfs/dir.c b/trunk/fs/ntfs/dir.c
index 9e38dafa3bc7..aa411c3f20e9 100644
--- a/trunk/fs/ntfs/dir.c
+++ b/trunk/fs/ntfs/dir.c
@@ -1004,11 +1004,13 @@ u64 ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname,
/**
* ntfs_filldir - ntfs specific filldir method
* @vol: current ntfs volume
+ * @fpos: position in the directory
* @ndir: ntfs inode of current directory
* @ia_page: page in which the index allocation buffer @ie is in resides
* @ie: current index entry
* @name: buffer to use for the converted name
- * @actor: what to feed the entries to
+ * @dirent: vfs filldir callback context
+ * @filldir: vfs filldir callback
*
* Convert the Unicode @name to the loaded NLS and pass it to the @filldir
* callback.
@@ -1022,12 +1024,12 @@ u64 ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname,
* retake the lock if we are returning a non-zero value as ntfs_readdir()
* would need to drop the lock immediately anyway.
*/
-static inline int ntfs_filldir(ntfs_volume *vol,
+static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos,
ntfs_inode *ndir, struct page *ia_page, INDEX_ENTRY *ie,
- u8 *name, struct dir_context *actor)
+ u8 *name, void *dirent, filldir_t filldir)
{
unsigned long mref;
- int name_len;
+ int name_len, rc;
unsigned dt_type;
FILE_NAME_TYPE_FLAGS name_type;
@@ -1066,14 +1068,13 @@ static inline int ntfs_filldir(ntfs_volume *vol,
if (ia_page)
unlock_page(ia_page);
ntfs_debug("Calling filldir for %s with len %i, fpos 0x%llx, inode "
- "0x%lx, DT_%s.", name, name_len, actor->pos, mref,
+ "0x%lx, DT_%s.", name, name_len, fpos, mref,
dt_type == DT_DIR ? "DIR" : "REG");
- if (!dir_emit(actor, name, name_len, mref, dt_type))
- return 1;
+ rc = filldir(dirent, name, name_len, fpos, mref, dt_type);
/* Relock the page but not if we are aborting ->readdir. */
- if (ia_page)
+ if (!rc && ia_page)
lock_page(ia_page);
- return 0;
+ return rc;
}
/*
@@ -1096,11 +1097,11 @@ static inline int ntfs_filldir(ntfs_volume *vol,
* removes them again after the write is complete after which it
* unlocks the page.
*/
-static int ntfs_readdir(struct file *file, struct dir_context *actor)
+static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
s64 ia_pos, ia_start, prev_ia_pos, bmp_pos;
- loff_t i_size;
- struct inode *bmp_vi, *vdir = file_inode(file);
+ loff_t fpos, i_size;
+ struct inode *bmp_vi, *vdir = file_inode(filp);
struct super_block *sb = vdir->i_sb;
ntfs_inode *ndir = NTFS_I(vdir);
ntfs_volume *vol = NTFS_SB(sb);
@@ -1115,16 +1116,33 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor)
u8 *kaddr, *bmp, *index_end;
ntfs_attr_search_ctx *ctx;
+ fpos = filp->f_pos;
ntfs_debug("Entering for inode 0x%lx, fpos 0x%llx.",
- vdir->i_ino, actor->pos);
+ vdir->i_ino, fpos);
rc = err = 0;
/* Are we at end of dir yet? */
i_size = i_size_read(vdir);
- if (actor->pos >= i_size + vol->mft_record_size)
- return 0;
+ if (fpos >= i_size + vol->mft_record_size)
+ goto done;
/* Emulate . and .. for all directories. */
- if (!dir_emit_dots(file, actor))
- return 0;
+ if (!fpos) {
+ ntfs_debug("Calling filldir for . with len 1, fpos 0x0, "
+ "inode 0x%lx, DT_DIR.", vdir->i_ino);
+ rc = filldir(dirent, ".", 1, fpos, vdir->i_ino, DT_DIR);
+ if (rc)
+ goto done;
+ fpos++;
+ }
+ if (fpos == 1) {
+ ntfs_debug("Calling filldir for .. with len 2, fpos 0x1, "
+ "inode 0x%lx, DT_DIR.",
+ (unsigned long)parent_ino(filp->f_path.dentry));
+ rc = filldir(dirent, "..", 2, fpos,
+ parent_ino(filp->f_path.dentry), DT_DIR);
+ if (rc)
+ goto done;
+ fpos++;
+ }
m = NULL;
ctx = NULL;
/*
@@ -1137,7 +1155,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor)
goto err_out;
}
/* Are we jumping straight into the index allocation attribute? */
- if (actor->pos >= vol->mft_record_size)
+ if (fpos >= vol->mft_record_size)
goto skip_index_root;
/* Get hold of the mft record for the directory. */
m = map_mft_record(ndir);
@@ -1152,7 +1170,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor)
goto err_out;
}
/* Get the offset into the index root attribute. */
- ir_pos = (s64)actor->pos;
+ ir_pos = (s64)fpos;
/* Find the index root attribute in the mft record. */
err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL,
0, ctx);
@@ -1208,9 +1226,10 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor)
if (ir_pos > (u8*)ie - (u8*)ir)
continue;
/* Advance the position even if going to skip the entry. */
- actor->pos = (u8*)ie - (u8*)ir;
+ fpos = (u8*)ie - (u8*)ir;
/* Submit the name to the filldir callback. */
- rc = ntfs_filldir(vol, ndir, NULL, ie, name, actor);
+ rc = ntfs_filldir(vol, fpos, ndir, NULL, ie, name, dirent,
+ filldir);
if (rc) {
kfree(ir);
goto abort;
@@ -1223,12 +1242,12 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor)
if (!NInoIndexAllocPresent(ndir))
goto EOD;
/* Advance fpos to the beginning of the index allocation. */
- actor->pos = vol->mft_record_size;
+ fpos = vol->mft_record_size;
skip_index_root:
kaddr = NULL;
prev_ia_pos = -1LL;
/* Get the offset into the index allocation attribute. */
- ia_pos = (s64)actor->pos - vol->mft_record_size;
+ ia_pos = (s64)fpos - vol->mft_record_size;
ia_mapping = vdir->i_mapping;
ntfs_debug("Inode 0x%lx, getting index bitmap.", vdir->i_ino);
bmp_vi = ntfs_attr_iget(vdir, AT_BITMAP, I30, 4);
@@ -1390,7 +1409,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor)
if (ia_pos - ia_start > (u8*)ie - (u8*)ia)
continue;
/* Advance the position even if going to skip the entry. */
- actor->pos = (u8*)ie - (u8*)ia +
+ fpos = (u8*)ie - (u8*)ia +
(sle64_to_cpu(ia->index_block_vcn) <<
ndir->itype.index.vcn_size_bits) +
vol->mft_record_size;
@@ -1400,7 +1419,8 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor)
* before returning, unless a non-zero value is returned in
* which case the page is left unlocked.
*/
- rc = ntfs_filldir(vol, ndir, ia_page, ie, name, actor);
+ rc = ntfs_filldir(vol, fpos, ndir, ia_page, ie, name, dirent,
+ filldir);
if (rc) {
/* @ia_page is already unlocked in this case. */
ntfs_unmap_page(ia_page);
@@ -1419,9 +1439,18 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor)
iput(bmp_vi);
EOD:
/* We are finished, set fpos to EOD. */
- actor->pos = i_size + vol->mft_record_size;
+ fpos = i_size + vol->mft_record_size;
abort:
kfree(name);
+done:
+#ifdef DEBUG
+ if (!rc)
+ ntfs_debug("EOD, fpos 0x%llx, returning 0.", fpos);
+ else
+ ntfs_debug("filldir returned %i, fpos 0x%llx, returning 0.",
+ rc, fpos);
+#endif
+ filp->f_pos = fpos;
return 0;
err_out:
if (bmp_page) {
@@ -1442,6 +1471,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *actor)
if (!err)
err = -EIO;
ntfs_debug("Failed. Returning error code %i.", -err);
+ filp->f_pos = fpos;
return err;
}
@@ -1541,7 +1571,7 @@ static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end,
const struct file_operations ntfs_dir_ops = {
.llseek = generic_file_llseek, /* Seek inside directory. */
.read = generic_read_dir, /* Return -EISDIR. */
- .iterate = ntfs_readdir, /* Read directory contents. */
+ .readdir = ntfs_readdir, /* Read directory contents. */
#ifdef NTFS_RW
.fsync = ntfs_dir_fsync, /* Sync a directory to disk. */
/*.aio_fsync = ,*/ /* Sync all outstanding async
diff --git a/trunk/fs/ocfs2/aops.c b/trunk/fs/ocfs2/aops.c
index 79736a28d84f..20dfec72e903 100644
--- a/trunk/fs/ocfs2/aops.c
+++ b/trunk/fs/ocfs2/aops.c
@@ -603,12 +603,11 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
* from ext3. PageChecked() bits have been removed as OCFS2 does not
* do journalled data.
*/
-static void ocfs2_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
{
journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
- jbd2_journal_invalidatepage(journal, page, offset, length);
+ jbd2_journal_invalidatepage(journal, page, offset);
}
static int ocfs2_releasepage(struct page *page, gfp_t wait)
diff --git a/trunk/fs/ocfs2/dir.c b/trunk/fs/ocfs2/dir.c
index eb760d8acd50..f1e1aed8f638 100644
--- a/trunk/fs/ocfs2/dir.c
+++ b/trunk/fs/ocfs2/dir.c
@@ -1761,10 +1761,11 @@ int __ocfs2_add_entry(handle_t *handle,
static int ocfs2_dir_foreach_blk_id(struct inode *inode,
u64 *f_version,
- struct dir_context *ctx)
+ loff_t *f_pos, void *priv,
+ filldir_t filldir, int *filldir_err)
{
- int ret, i;
- unsigned long offset = ctx->pos;
+ int ret, i, filldir_ret;
+ unsigned long offset = *f_pos;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_inline_data *data;
@@ -1780,7 +1781,8 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
di = (struct ocfs2_dinode *)di_bh->b_data;
data = &di->id2.i_data;
- while (ctx->pos < i_size_read(inode)) {
+ while (*f_pos < i_size_read(inode)) {
+revalidate:
/* If the dir block has changed since the last call to
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
@@ -1800,31 +1802,50 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
break;
i += le16_to_cpu(de->rec_len);
}
- ctx->pos = offset = i;
+ *f_pos = offset = i;
*f_version = inode->i_version;
}
- de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
- if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
+ de = (struct ocfs2_dir_entry *) (data->id_data + *f_pos);
+ if (!ocfs2_check_dir_entry(inode, de, di_bh, *f_pos)) {
/* On error, skip the f_pos to the end. */
- ctx->pos = i_size_read(inode);
- break;
+ *f_pos = i_size_read(inode);
+ goto out;
}
offset += le16_to_cpu(de->rec_len);
if (le64_to_cpu(de->inode)) {
+ /* We might block in the next section
+ * if the data destination is
+ * currently swapped out. So, use a
+ * version stamp to detect whether or
+ * not the directory has been modified
+ * during the copy operation.
+ */
+ u64 version = *f_version;
unsigned char d_type = DT_UNKNOWN;
if (de->file_type < OCFS2_FT_MAX)
d_type = ocfs2_filetype_table[de->file_type];
- if (!dir_emit(ctx, de->name, de->name_len,
- le64_to_cpu(de->inode), d_type))
- goto out;
+ filldir_ret = filldir(priv, de->name,
+ de->name_len,
+ *f_pos,
+ le64_to_cpu(de->inode),
+ d_type);
+ if (filldir_ret) {
+ if (filldir_err)
+ *filldir_err = filldir_ret;
+ break;
+ }
+ if (version != *f_version)
+ goto revalidate;
}
- ctx->pos += le16_to_cpu(de->rec_len);
+ *f_pos += le16_to_cpu(de->rec_len);
}
+
out:
brelse(di_bh);
+
return 0;
}
@@ -1834,26 +1855,27 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
*/
static int ocfs2_dir_foreach_blk_el(struct inode *inode,
u64 *f_version,
- struct dir_context *ctx,
- bool persist)
+ loff_t *f_pos, void *priv,
+ filldir_t filldir, int *filldir_err)
{
+ int error = 0;
unsigned long offset, blk, last_ra_blk = 0;
- int i;
+ int i, stored;
struct buffer_head * bh, * tmp;
struct ocfs2_dir_entry * de;
struct super_block * sb = inode->i_sb;
unsigned int ra_sectors = 16;
- int stored = 0;
+ stored = 0;
bh = NULL;
- offset = ctx->pos & (sb->s_blocksize - 1);
+ offset = (*f_pos) & (sb->s_blocksize - 1);
- while (ctx->pos < i_size_read(inode)) {
- blk = ctx->pos >> sb->s_blocksize_bits;
+ while (!error && !stored && *f_pos < i_size_read(inode)) {
+ blk = (*f_pos) >> sb->s_blocksize_bits;
if (ocfs2_read_dir_block(inode, blk, &bh, 0)) {
/* Skip the corrupt dirblock and keep trying */
- ctx->pos += sb->s_blocksize - offset;
+ *f_pos += sb->s_blocksize - offset;
continue;
}
@@ -1875,6 +1897,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
ra_sectors = 8;
}
+revalidate:
/* If the dir block has changed since the last call to
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
@@ -1894,64 +1917,93 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
i += le16_to_cpu(de->rec_len);
}
offset = i;
- ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
+ *f_pos = ((*f_pos) & ~(sb->s_blocksize - 1))
| offset;
*f_version = inode->i_version;
}
- while (ctx->pos < i_size_read(inode)
+ while (!error && *f_pos < i_size_read(inode)
&& offset < sb->s_blocksize) {
de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
/* On error, skip the f_pos to the
next block. */
- ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
+ *f_pos = ((*f_pos) | (sb->s_blocksize - 1)) + 1;
brelse(bh);
- continue;
+ goto out;
}
+ offset += le16_to_cpu(de->rec_len);
if (le64_to_cpu(de->inode)) {
+ /* We might block in the next section
+ * if the data destination is
+ * currently swapped out. So, use a
+ * version stamp to detect whether or
+ * not the directory has been modified
+ * during the copy operation.
+ */
+ unsigned long version = *f_version;
unsigned char d_type = DT_UNKNOWN;
if (de->file_type < OCFS2_FT_MAX)
d_type = ocfs2_filetype_table[de->file_type];
- if (!dir_emit(ctx, de->name,
+ error = filldir(priv, de->name,
de->name_len,
+ *f_pos,
le64_to_cpu(de->inode),
- d_type)) {
- brelse(bh);
- return 0;
+ d_type);
+ if (error) {
+ if (filldir_err)
+ *filldir_err = error;
+ break;
}
- stored++;
+ if (version != *f_version)
+ goto revalidate;
+ stored ++;
}
- offset += le16_to_cpu(de->rec_len);
- ctx->pos += le16_to_cpu(de->rec_len);
+ *f_pos += le16_to_cpu(de->rec_len);
}
offset = 0;
brelse(bh);
bh = NULL;
- if (!persist && stored)
- break;
}
- return 0;
+
+ stored = 0;
+out:
+ return stored;
}
static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
- struct dir_context *ctx,
- bool persist)
+ loff_t *f_pos, void *priv, filldir_t filldir,
+ int *filldir_err)
{
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
- return ocfs2_dir_foreach_blk_id(inode, f_version, ctx);
- return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist);
+ return ocfs2_dir_foreach_blk_id(inode, f_version, f_pos, priv,
+ filldir, filldir_err);
+
+ return ocfs2_dir_foreach_blk_el(inode, f_version, f_pos, priv, filldir,
+ filldir_err);
}
/*
* This is intended to be called from inside other kernel functions,
* so we fake some arguments.
*/
-int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
+int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
+ filldir_t filldir)
{
+ int ret = 0, filldir_err = 0;
u64 version = inode->i_version;
- ocfs2_dir_foreach_blk(inode, &version, ctx, true);
+
+ while (*f_pos < i_size_read(inode)) {
+ ret = ocfs2_dir_foreach_blk(inode, &version, f_pos, priv,
+ filldir, &filldir_err);
+ if (ret || filldir_err)
+ break;
+ }
+
+ if (ret > 0)
+ ret = -EIO;
+
return 0;
}
@@ -1959,15 +2011,15 @@ int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
* ocfs2_readdir()
*
*/
-int ocfs2_readdir(struct file *file, struct dir_context *ctx)
+int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
int error = 0;
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
int lock_level = 0;
trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
- error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level);
+ error = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
if (lock_level && error >= 0) {
/* We release EX lock which used to update atime
* and get PR lock again to reduce contention
@@ -1983,7 +2035,8 @@ int ocfs2_readdir(struct file *file, struct dir_context *ctx)
goto bail_nolock;
}
- error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false);
+ error = ocfs2_dir_foreach_blk(inode, &filp->f_version, &filp->f_pos,
+ dirent, filldir, NULL);
ocfs2_inode_unlock(inode, lock_level);
if (error)
@@ -2067,7 +2120,6 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
}
struct ocfs2_empty_dir_priv {
- struct dir_context ctx;
unsigned seen_dot;
unsigned seen_dot_dot;
unsigned seen_other;
@@ -2152,9 +2204,8 @@ static int ocfs2_empty_dir_dx(struct inode *inode,
int ocfs2_empty_dir(struct inode *inode)
{
int ret;
- struct ocfs2_empty_dir_priv priv = {
- .ctx.actor = ocfs2_empty_dir_filldir
- };
+ loff_t start = 0;
+ struct ocfs2_empty_dir_priv priv;
memset(&priv, 0, sizeof(priv));
@@ -2168,7 +2219,7 @@ int ocfs2_empty_dir(struct inode *inode)
*/
}
- ret = ocfs2_dir_foreach(inode, &priv.ctx);
+ ret = ocfs2_dir_foreach(inode, &start, &priv, ocfs2_empty_dir_filldir);
if (ret)
mlog_errno(ret);
diff --git a/trunk/fs/ocfs2/dir.h b/trunk/fs/ocfs2/dir.h
index f0344b75b14d..e683f3deb645 100644
--- a/trunk/fs/ocfs2/dir.h
+++ b/trunk/fs/ocfs2/dir.h
@@ -92,8 +92,9 @@ int ocfs2_find_files_on_disk(const char *name,
struct ocfs2_dir_lookup_result *res);
int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
int namelen, u64 *blkno);
-int ocfs2_readdir(struct file *file, struct dir_context *ctx);
-int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx);
+int ocfs2_readdir(struct file *filp, void *dirent, filldir_t filldir);
+int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
+ filldir_t filldir);
int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
struct inode *dir,
struct buffer_head *parent_fe_bh,
diff --git a/trunk/fs/ocfs2/file.c b/trunk/fs/ocfs2/file.c
index 8a38714f1d92..ff54014a24ec 100644
--- a/trunk/fs/ocfs2/file.c
+++ b/trunk/fs/ocfs2/file.c
@@ -2712,7 +2712,7 @@ const struct file_operations ocfs2_fops = {
const struct file_operations ocfs2_dops = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = ocfs2_readdir,
+ .readdir = ocfs2_readdir,
.fsync = ocfs2_sync_file,
.release = ocfs2_dir_release,
.open = ocfs2_dir_open,
@@ -2759,7 +2759,7 @@ const struct file_operations ocfs2_fops_no_plocks = {
const struct file_operations ocfs2_dops_no_plocks = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = ocfs2_readdir,
+ .readdir = ocfs2_readdir,
.fsync = ocfs2_sync_file,
.release = ocfs2_dir_release,
.open = ocfs2_dir_open,
diff --git a/trunk/fs/ocfs2/journal.c b/trunk/fs/ocfs2/journal.c
index 242170d83971..8eccfabcd12e 100644
--- a/trunk/fs/ocfs2/journal.c
+++ b/trunk/fs/ocfs2/journal.c
@@ -1941,7 +1941,6 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
}
struct ocfs2_orphan_filldir_priv {
- struct dir_context ctx;
struct inode *head;
struct ocfs2_super *osb;
};
@@ -1978,11 +1977,11 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
{
int status;
struct inode *orphan_dir_inode = NULL;
- struct ocfs2_orphan_filldir_priv priv = {
- .ctx.actor = ocfs2_orphan_filldir,
- .osb = osb,
- .head = *head
- };
+ struct ocfs2_orphan_filldir_priv priv;
+ loff_t pos = 0;
+
+ priv.osb = osb;
+ priv.head = *head;
orphan_dir_inode = ocfs2_get_system_file_inode(osb,
ORPHAN_DIR_SYSTEM_INODE,
@@ -2000,7 +1999,8 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
goto out;
}
- status = ocfs2_dir_foreach(orphan_dir_inode, &priv.ctx);
+ status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv,
+ ocfs2_orphan_filldir);
if (status) {
mlog_errno(status);
goto out_cluster;
diff --git a/trunk/fs/omfs/dir.c b/trunk/fs/omfs/dir.c
index 1b8e9e8405b2..acbaebcad3a8 100644
--- a/trunk/fs/omfs/dir.c
+++ b/trunk/fs/omfs/dir.c
@@ -327,23 +327,26 @@ int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
return is_bad;
}
-static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx,
+static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir,
u64 fsblock, int hindex)
{
+ struct inode *dir = file_inode(filp);
+ struct buffer_head *bh;
+ struct omfs_inode *oi;
+ u64 self;
+ int res = 0;
+ unsigned char d_type;
+
/* follow chain in this bucket */
while (fsblock != ~0) {
- struct buffer_head *bh = omfs_bread(dir->i_sb, fsblock);
- struct omfs_inode *oi;
- u64 self;
- unsigned char d_type;
-
+ bh = omfs_bread(dir->i_sb, fsblock);
if (!bh)
- return true;
+ goto out;
oi = (struct omfs_inode *) bh->b_data;
if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, fsblock)) {
brelse(bh);
- return true;
+ goto out;
}
self = fsblock;
@@ -358,16 +361,15 @@ static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx,
d_type = (oi->i_type == OMFS_DIR) ? DT_DIR : DT_REG;
- if (!dir_emit(ctx, oi->i_name,
- strnlen(oi->i_name, OMFS_NAMELEN),
- self, d_type)) {
- brelse(bh);
- return false;
- }
+ res = filldir(dirent, oi->i_name, strnlen(oi->i_name,
+ OMFS_NAMELEN), filp->f_pos, self, d_type);
brelse(bh);
- ctx->pos++;
+ if (res < 0)
+ break;
+ filp->f_pos++;
}
- return true;
+out:
+ return res;
}
static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry,
@@ -401,44 +403,60 @@ static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry,
return err;
}
-static int omfs_readdir(struct file *file, struct dir_context *ctx)
+static int omfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *dir = file_inode(file);
+ struct inode *dir = file_inode(filp);
struct buffer_head *bh;
- __be64 *p;
+ loff_t offset, res;
unsigned int hchain, hindex;
int nbuckets;
-
- if (ctx->pos >> 32)
- return -EINVAL;
-
- if (ctx->pos < 1 << 20) {
- if (!dir_emit_dots(file, ctx))
- return 0;
- ctx->pos = 1 << 20;
+ u64 fsblock;
+ int ret = -EINVAL;
+
+ if (filp->f_pos >> 32)
+ goto success;
+
+ switch ((unsigned long) filp->f_pos) {
+ case 0:
+ if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0)
+ goto success;
+ filp->f_pos++;
+ /* fall through */
+ case 1:
+ if (filldir(dirent, "..", 2, 1,
+ parent_ino(filp->f_dentry), DT_DIR) < 0)
+ goto success;
+ filp->f_pos = 1 << 20;
+ /* fall through */
}
nbuckets = (dir->i_size - OMFS_DIR_START) / 8;
/* high 12 bits store bucket + 1 and low 20 bits store hash index */
- hchain = (ctx->pos >> 20) - 1;
- hindex = ctx->pos & 0xfffff;
+ hchain = (filp->f_pos >> 20) - 1;
+ hindex = filp->f_pos & 0xfffff;
bh = omfs_bread(dir->i_sb, dir->i_ino);
if (!bh)
- return -EINVAL;
+ goto out;
- p = (__be64 *)(bh->b_data + OMFS_DIR_START) + hchain;
+ offset = OMFS_DIR_START + hchain * 8;
- for (; hchain < nbuckets; hchain++) {
- __u64 fsblock = be64_to_cpu(*p++);
- if (!omfs_fill_chain(dir, ctx, fsblock, hindex))
- break;
+ for (; hchain < nbuckets; hchain++, offset += 8) {
+ fsblock = be64_to_cpu(*((__be64 *) &bh->b_data[offset]));
+
+ res = omfs_fill_chain(filp, dirent, filldir, fsblock, hindex);
hindex = 0;
- ctx->pos = (hchain+2) << 20;
+ if (res < 0)
+ break;
+
+ filp->f_pos = (hchain+2) << 20;
}
brelse(bh);
- return 0;
+success:
+ ret = 0;
+out:
+ return ret;
}
const struct inode_operations omfs_dir_inops = {
@@ -452,6 +470,6 @@ const struct inode_operations omfs_dir_inops = {
const struct file_operations omfs_dir_operations = {
.read = generic_read_dir,
- .iterate = omfs_readdir,
+ .readdir = omfs_readdir,
.llseek = generic_file_llseek,
};
diff --git a/trunk/fs/openpromfs/inode.c b/trunk/fs/openpromfs/inode.c
index 8c0ceb8dd1f7..75885ffde44e 100644
--- a/trunk/fs/openpromfs/inode.c
+++ b/trunk/fs/openpromfs/inode.c
@@ -162,11 +162,11 @@ static const struct file_operations openpromfs_prop_ops = {
.release = seq_release,
};
-static int openpromfs_readdir(struct file *, struct dir_context *);
+static int openpromfs_readdir(struct file *, void *, filldir_t);
static const struct file_operations openprom_operations = {
.read = generic_read_dir,
- .iterate = openpromfs_readdir,
+ .readdir = openpromfs_readdir,
.llseek = generic_file_llseek,
};
@@ -260,64 +260,71 @@ static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry
return NULL;
}
-static int openpromfs_readdir(struct file *file, struct dir_context *ctx)
+static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct op_inode_info *oi = OP_I(inode);
struct device_node *dp = oi->u.node;
struct device_node *child;
struct property *prop;
+ unsigned int ino;
int i;
mutex_lock(&op_mutex);
- if (ctx->pos == 0) {
- if (!dir_emit(ctx, ".", 1, inode->i_ino, DT_DIR))
+ ino = inode->i_ino;
+ i = filp->f_pos;
+ switch (i) {
+ case 0:
+ if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
goto out;
- ctx->pos = 1;
- }
- if (ctx->pos == 1) {
- if (!dir_emit(ctx, "..", 2,
+ i++;
+ filp->f_pos++;
+ /* fall thru */
+ case 1:
+ if (filldir(dirent, "..", 2, i,
(dp->parent == NULL ?
OPENPROM_ROOT_INO :
- dp->parent->unique_id), DT_DIR))
- goto out;
- ctx->pos = 2;
- }
- i = ctx->pos - 2;
-
- /* First, the children nodes as directories. */
- child = dp->child;
- while (i && child) {
- child = child->sibling;
- i--;
- }
- while (child) {
- if (!dir_emit(ctx,
- child->path_component_name,
- strlen(child->path_component_name),
- child->unique_id, DT_DIR))
+ dp->parent->unique_id), DT_DIR) < 0)
goto out;
+ i++;
+ filp->f_pos++;
+ /* fall thru */
+ default:
+ i -= 2;
+
+ /* First, the children nodes as directories. */
+ child = dp->child;
+ while (i && child) {
+ child = child->sibling;
+ i--;
+ }
+ while (child) {
+ if (filldir(dirent,
+ child->path_component_name,
+ strlen(child->path_component_name),
+ filp->f_pos, child->unique_id, DT_DIR) < 0)
+ goto out;
+
+ filp->f_pos++;
+ child = child->sibling;
+ }
- ctx->pos++;
- child = child->sibling;
- }
-
- /* Next, the properties as files. */
- prop = dp->properties;
- while (i && prop) {
- prop = prop->next;
- i--;
- }
- while (prop) {
- if (!dir_emit(ctx, prop->name, strlen(prop->name),
- prop->unique_id, DT_REG))
- goto out;
+ /* Next, the properties as files. */
+ prop = dp->properties;
+ while (i && prop) {
+ prop = prop->next;
+ i--;
+ }
+ while (prop) {
+ if (filldir(dirent, prop->name, strlen(prop->name),
+ filp->f_pos, prop->unique_id, DT_REG) < 0)
+ goto out;
- ctx->pos++;
- prop = prop->next;
+ filp->f_pos++;
+ prop = prop->next;
+ }
}
-
out:
mutex_unlock(&op_mutex);
return 0;
diff --git a/trunk/fs/proc/base.c b/trunk/fs/proc/base.c
index 0016350ad95e..c3834dad09b3 100644
--- a/trunk/fs/proc/base.c
+++ b/trunk/fs/proc/base.c
@@ -1681,11 +1681,11 @@ const struct dentry_operations pid_dentry_operations =
* reported by readdir in sync with the inode numbers reported
* by stat.
*/
-bool proc_fill_cache(struct file *file, struct dir_context *ctx,
+int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
const char *name, int len,
instantiate_t instantiate, struct task_struct *task, const void *ptr)
{
- struct dentry *child, *dir = file->f_path.dentry;
+ struct dentry *child, *dir = filp->f_path.dentry;
struct inode *inode;
struct qstr qname;
ino_t ino = 0;
@@ -1720,7 +1720,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
ino = find_inode_number(dir, &qname);
if (!ino)
ino = 1;
- return dir_emit(ctx, name, len, ino, type);
+ return filldir(dirent, name, len, filp->f_pos, ino, type);
}
#ifdef CONFIG_CHECKPOINT_RESTORE
@@ -1931,15 +1931,14 @@ static const struct inode_operations proc_map_files_inode_operations = {
};
static int
-proc_map_files_readdir(struct file *file, struct dir_context *ctx)
+proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
struct vm_area_struct *vma;
struct task_struct *task;
struct mm_struct *mm;
- unsigned long nr_files, pos, i;
- struct flex_array *fa = NULL;
- struct map_files_info info;
- struct map_files_info *p;
+ ino_t ino;
int ret;
ret = -EPERM;
@@ -1947,7 +1946,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
goto out;
ret = -ENOENT;
- task = get_proc_task(file_inode(file));
+ task = get_proc_task(inode);
if (!task)
goto out;
@@ -1956,73 +1955,91 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
goto out_put_task;
ret = 0;
- if (!dir_emit_dots(file, ctx))
- goto out_put_task;
+ switch (filp->f_pos) {
+ case 0:
+ ino = inode->i_ino;
+ if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0)
+ goto out_put_task;
+ filp->f_pos++;
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
+ goto out_put_task;
+ filp->f_pos++;
+ default:
+ {
+ unsigned long nr_files, pos, i;
+ struct flex_array *fa = NULL;
+ struct map_files_info info;
+ struct map_files_info *p;
+
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out_put_task;
+ down_read(&mm->mmap_sem);
- mm = get_task_mm(task);
- if (!mm)
- goto out_put_task;
- down_read(&mm->mmap_sem);
+ nr_files = 0;
- nr_files = 0;
+ /*
+ * We need two passes here:
+ *
+ * 1) Collect vmas of mapped files with mmap_sem taken
+ * 2) Release mmap_sem and instantiate entries
+ *
+ * otherwise we get lockdep complained, since filldir()
+ * routine might require mmap_sem taken in might_fault().
+ */
- /*
- * We need two passes here:
- *
- * 1) Collect vmas of mapped files with mmap_sem taken
- * 2) Release mmap_sem and instantiate entries
- *
- * otherwise we get lockdep complained, since filldir()
- * routine might require mmap_sem taken in might_fault().
- */
+ for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
+ if (vma->vm_file && ++pos > filp->f_pos)
+ nr_files++;
+ }
- for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
- if (vma->vm_file && ++pos > ctx->pos)
- nr_files++;
- }
-
- if (nr_files) {
- fa = flex_array_alloc(sizeof(info), nr_files,
- GFP_KERNEL);
- if (!fa || flex_array_prealloc(fa, 0, nr_files,
- GFP_KERNEL)) {
- ret = -ENOMEM;
- if (fa)
- flex_array_free(fa);
- up_read(&mm->mmap_sem);
- mmput(mm);
- goto out_put_task;
+ if (nr_files) {
+ fa = flex_array_alloc(sizeof(info), nr_files,
+ GFP_KERNEL);
+ if (!fa || flex_array_prealloc(fa, 0, nr_files,
+ GFP_KERNEL)) {
+ ret = -ENOMEM;
+ if (fa)
+ flex_array_free(fa);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ goto out_put_task;
+ }
+ for (i = 0, vma = mm->mmap, pos = 2; vma;
+ vma = vma->vm_next) {
+ if (!vma->vm_file)
+ continue;
+ if (++pos <= filp->f_pos)
+ continue;
+
+ info.mode = vma->vm_file->f_mode;
+ info.len = snprintf(info.name,
+ sizeof(info.name), "%lx-%lx",
+ vma->vm_start, vma->vm_end);
+ if (flex_array_put(fa, i++, &info, GFP_KERNEL))
+ BUG();
+ }
}
- for (i = 0, vma = mm->mmap, pos = 2; vma;
- vma = vma->vm_next) {
- if (!vma->vm_file)
- continue;
- if (++pos <= ctx->pos)
- continue;
-
- info.mode = vma->vm_file->f_mode;
- info.len = snprintf(info.name,
- sizeof(info.name), "%lx-%lx",
- vma->vm_start, vma->vm_end);
- if (flex_array_put(fa, i++, &info, GFP_KERNEL))
- BUG();
+ up_read(&mm->mmap_sem);
+
+ for (i = 0; i < nr_files; i++) {
+ p = flex_array_get(fa, i);
+ ret = proc_fill_cache(filp, dirent, filldir,
+ p->name, p->len,
+ proc_map_files_instantiate,
+ task,
+ (void *)(unsigned long)p->mode);
+ if (ret)
+ break;
+ filp->f_pos++;
}
+ if (fa)
+ flex_array_free(fa);
+ mmput(mm);
}
- up_read(&mm->mmap_sem);
-
- for (i = 0; i < nr_files; i++) {
- p = flex_array_get(fa, i);
- if (!proc_fill_cache(file, ctx,
- p->name, p->len,
- proc_map_files_instantiate,
- task,
- (void *)(unsigned long)p->mode))
- break;
- ctx->pos++;
}
- if (fa)
- flex_array_free(fa);
- mmput(mm);
out_put_task:
put_task_struct(task);
@@ -2032,7 +2049,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
static const struct file_operations proc_map_files_operations = {
.read = generic_read_dir,
- .iterate = proc_map_files_readdir,
+ .readdir = proc_map_files_readdir,
.llseek = default_llseek,
};
@@ -2200,30 +2217,67 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
return error;
}
-static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
+static int proc_pident_fill_cache(struct file *filp, void *dirent,
+ filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
+{
+ return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
+ proc_pident_instantiate, task, p);
+}
+
+static int proc_pident_readdir(struct file *filp,
+ void *dirent, filldir_t filldir,
const struct pid_entry *ents, unsigned int nents)
{
- struct task_struct *task = get_proc_task(file_inode(file));
- const struct pid_entry *p;
+ int i;
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ struct task_struct *task = get_proc_task(inode);
+ const struct pid_entry *p, *last;
+ ino_t ino;
+ int ret;
+ ret = -ENOENT;
if (!task)
- return -ENOENT;
-
- if (!dir_emit_dots(file, ctx))
- goto out;
-
- if (ctx->pos >= nents + 2)
- goto out;
+ goto out_no_task;
- for (p = ents + (ctx->pos - 2); p <= ents + nents - 1; p++) {
- if (!proc_fill_cache(file, ctx, p->name, p->len,
- proc_pident_instantiate, task, p))
- break;
- ctx->pos++;
+ ret = 0;
+ i = filp->f_pos;
+ switch (i) {
+ case 0:
+ ino = inode->i_ino;
+ if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+ goto out;
+ i++;
+ filp->f_pos++;
+ /* fall through */
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
+ goto out;
+ i++;
+ filp->f_pos++;
+ /* fall through */
+ default:
+ i -= 2;
+ if (i >= nents) {
+ ret = 1;
+ goto out;
+ }
+ p = ents + i;
+ last = &ents[nents - 1];
+ while (p <= last) {
+ if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0)
+ goto out;
+ filp->f_pos++;
+ p++;
+ }
}
+
+ ret = 1;
out:
put_task_struct(task);
- return 0;
+out_no_task:
+ return ret;
}
#ifdef CONFIG_SECURITY
@@ -2308,15 +2362,16 @@ static const struct pid_entry attr_dir_stuff[] = {
REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
};
-static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx)
+static int proc_attr_dir_readdir(struct file * filp,
+ void * dirent, filldir_t filldir)
{
- return proc_pident_readdir(file, ctx,
- attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
+ return proc_pident_readdir(filp,dirent,filldir,
+ attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff));
}
static const struct file_operations proc_attr_dir_operations = {
.read = generic_read_dir,
- .iterate = proc_attr_dir_readdir,
+ .readdir = proc_attr_dir_readdir,
.llseek = default_llseek,
};
@@ -2670,15 +2725,16 @@ static const struct pid_entry tgid_base_stuff[] = {
#endif
};
-static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
+static int proc_tgid_base_readdir(struct file * filp,
+ void * dirent, filldir_t filldir)
{
- return proc_pident_readdir(file, ctx,
- tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
+ return proc_pident_readdir(filp,dirent,filldir,
+ tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
}
static const struct file_operations proc_tgid_base_operations = {
.read = generic_read_dir,
- .iterate = proc_tgid_base_readdir,
+ .readdir = proc_tgid_base_readdir,
.llseek = default_llseek,
};
@@ -2880,42 +2936,58 @@ static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter ite
#define TGID_OFFSET (FIRST_PROCESS_ENTRY + 1)
+static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
+ struct tgid_iter iter)
+{
+ char name[PROC_NUMBUF];
+ int len = snprintf(name, sizeof(name), "%d", iter.tgid);
+ return proc_fill_cache(filp, dirent, filldir, name, len,
+ proc_pid_instantiate, iter.task, NULL);
+}
+
+static int fake_filldir(void *buf, const char *name, int namelen,
+ loff_t offset, u64 ino, unsigned d_type)
+{
+ return 0;
+}
+
/* for the /proc/ directory itself, after non-process stuff has been done */
-int proc_pid_readdir(struct file *file, struct dir_context *ctx)
+int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
struct tgid_iter iter;
struct pid_namespace *ns;
- loff_t pos = ctx->pos;
+ filldir_t __filldir;
+ loff_t pos = filp->f_pos;
if (pos >= PID_MAX_LIMIT + TGID_OFFSET)
- return 0;
+ goto out;
if (pos == TGID_OFFSET - 1) {
- if (!proc_fill_cache(file, ctx, "self", 4, NULL, NULL, NULL))
- return 0;
+ if (proc_fill_cache(filp, dirent, filldir, "self", 4,
+ NULL, NULL, NULL) < 0)
+ goto out;
iter.tgid = 0;
} else {
iter.tgid = pos - TGID_OFFSET;
}
iter.task = NULL;
- ns = file->f_dentry->d_sb->s_fs_info;
+ ns = filp->f_dentry->d_sb->s_fs_info;
for (iter = next_tgid(ns, iter);
iter.task;
iter.tgid += 1, iter = next_tgid(ns, iter)) {
- char name[PROC_NUMBUF];
- int len;
- if (!has_pid_permissions(ns, iter.task, 2))
- continue;
+ if (has_pid_permissions(ns, iter.task, 2))
+ __filldir = filldir;
+ else
+ __filldir = fake_filldir;
- len = snprintf(name, sizeof(name), "%d", iter.tgid);
- ctx->pos = iter.tgid + TGID_OFFSET;
- if (!proc_fill_cache(file, ctx, name, len,
- proc_pid_instantiate, iter.task, NULL)) {
+ filp->f_pos = iter.tgid + TGID_OFFSET;
+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
put_task_struct(iter.task);
- return 0;
+ goto out;
}
}
- ctx->pos = PID_MAX_LIMIT + TGID_OFFSET;
+ filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET;
+out:
return 0;
}
@@ -3003,10 +3075,11 @@ static const struct pid_entry tid_base_stuff[] = {
#endif
};
-static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
+static int proc_tid_base_readdir(struct file * filp,
+ void * dirent, filldir_t filldir)
{
- return proc_pident_readdir(file, ctx,
- tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
+ return proc_pident_readdir(filp,dirent,filldir,
+ tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
}
static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
@@ -3017,7 +3090,7 @@ static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *den
static const struct file_operations proc_tid_base_operations = {
.read = generic_read_dir,
- .iterate = proc_tid_base_readdir,
+ .readdir = proc_tid_base_readdir,
.llseek = default_llseek,
};
@@ -3158,16 +3231,30 @@ static struct task_struct *next_tid(struct task_struct *start)
return pos;
}
+static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
+ struct task_struct *task, int tid)
+{
+ char name[PROC_NUMBUF];
+ int len = snprintf(name, sizeof(name), "%d", tid);
+ return proc_fill_cache(filp, dirent, filldir, name, len,
+ proc_task_instantiate, task, NULL);
+}
+
/* for the /proc/TGID/task/ directories */
-static int proc_task_readdir(struct file *file, struct dir_context *ctx)
+static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
struct task_struct *leader = NULL;
- struct task_struct *task = get_proc_task(file_inode(file));
- struct pid_namespace *ns;
+ struct task_struct *task;
+ int retval = -ENOENT;
+ ino_t ino;
int tid;
+ struct pid_namespace *ns;
+ task = get_proc_task(inode);
if (!task)
- return -ENOENT;
+ goto out_no_task;
rcu_read_lock();
if (pid_alive(task)) {
leader = task->group_leader;
@@ -3176,36 +3263,46 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx)
rcu_read_unlock();
put_task_struct(task);
if (!leader)
- return -ENOENT;
+ goto out_no_task;
+ retval = 0;
- if (!dir_emit_dots(file, ctx))
- goto out;
+ switch ((unsigned long)filp->f_pos) {
+ case 0:
+ ino = inode->i_ino;
+ if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) < 0)
+ goto out;
+ filp->f_pos++;
+ /* fall through */
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) < 0)
+ goto out;
+ filp->f_pos++;
+ /* fall through */
+ }
/* f_version caches the tgid value that the last readdir call couldn't
* return. lseek aka telldir automagically resets f_version to 0.
*/
- ns = file->f_dentry->d_sb->s_fs_info;
- tid = (int)file->f_version;
- file->f_version = 0;
- for (task = first_tid(leader, tid, ctx->pos - 2, ns);
+ ns = filp->f_dentry->d_sb->s_fs_info;
+ tid = (int)filp->f_version;
+ filp->f_version = 0;
+ for (task = first_tid(leader, tid, filp->f_pos - 2, ns);
task;
- task = next_tid(task), ctx->pos++) {
- char name[PROC_NUMBUF];
- int len;
+ task = next_tid(task), filp->f_pos++) {
tid = task_pid_nr_ns(task, ns);
- len = snprintf(name, sizeof(name), "%d", tid);
- if (!proc_fill_cache(file, ctx, name, len,
- proc_task_instantiate, task, NULL)) {
+ if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) {
/* returning this tgid failed, save it as the first
* pid for the next readir call */
- file->f_version = (u64)tid;
+ filp->f_version = (u64)tid;
put_task_struct(task);
break;
}
}
out:
put_task_struct(leader);
- return 0;
+out_no_task:
+ return retval;
}
static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
@@ -3231,6 +3328,6 @@ static const struct inode_operations proc_task_inode_operations = {
static const struct file_operations proc_task_operations = {
.read = generic_read_dir,
- .iterate = proc_task_readdir,
+ .readdir = proc_task_readdir,
.llseek = default_llseek,
};
diff --git a/trunk/fs/proc/fd.c b/trunk/fs/proc/fd.c
index 1441f143c43b..d7a4a28ef630 100644
--- a/trunk/fs/proc/fd.c
+++ b/trunk/fs/proc/fd.c
@@ -219,58 +219,74 @@ static struct dentry *proc_lookupfd_common(struct inode *dir,
return result;
}
-static int proc_readfd_common(struct file *file, struct dir_context *ctx,
- instantiate_t instantiate)
+static int proc_readfd_common(struct file * filp, void * dirent,
+ filldir_t filldir, instantiate_t instantiate)
{
- struct task_struct *p = get_proc_task(file_inode(file));
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ struct task_struct *p = get_proc_task(inode);
struct files_struct *files;
- unsigned int fd;
+ unsigned int fd, ino;
+ int retval;
+ retval = -ENOENT;
if (!p)
- return -ENOENT;
-
- if (!dir_emit_dots(file, ctx))
- goto out;
- if (!dir_emit_dots(file, ctx))
- goto out;
- files = get_files_struct(p);
- if (!files)
- goto out;
+ goto out_no_task;
+ retval = 0;
+
+ fd = filp->f_pos;
+ switch (fd) {
+ case 0:
+ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
+ goto out;
+ filp->f_pos++;
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
+ goto out;
+ filp->f_pos++;
+ default:
+ files = get_files_struct(p);
+ if (!files)
+ goto out;
+ rcu_read_lock();
+ for (fd = filp->f_pos - 2;
+ fd < files_fdtable(files)->max_fds;
+ fd++, filp->f_pos++) {
+ char name[PROC_NUMBUF];
+ int len;
+ int rv;
+
+ if (!fcheck_files(files, fd))
+ continue;
+ rcu_read_unlock();
- rcu_read_lock();
- for (fd = ctx->pos - 2;
- fd < files_fdtable(files)->max_fds;
- fd++, ctx->pos++) {
- char name[PROC_NUMBUF];
- int len;
-
- if (!fcheck_files(files, fd))
- continue;
- rcu_read_unlock();
-
- len = snprintf(name, sizeof(name), "%d", fd);
- if (!proc_fill_cache(file, ctx,
- name, len, instantiate, p,
- (void *)(unsigned long)fd))
- goto out_fd_loop;
- rcu_read_lock();
- }
- rcu_read_unlock();
+ len = snprintf(name, sizeof(name), "%d", fd);
+ rv = proc_fill_cache(filp, dirent, filldir,
+ name, len, instantiate, p,
+ (void *)(unsigned long)fd);
+ if (rv < 0)
+ goto out_fd_loop;
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
out_fd_loop:
- put_files_struct(files);
+ put_files_struct(files);
+ }
out:
put_task_struct(p);
- return 0;
+out_no_task:
+ return retval;
}
-static int proc_readfd(struct file *file, struct dir_context *ctx)
+static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir)
{
- return proc_readfd_common(file, ctx, proc_fd_instantiate);
+ return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate);
}
const struct file_operations proc_fd_operations = {
.read = generic_read_dir,
- .iterate = proc_readfd,
+ .readdir = proc_readfd,
.llseek = default_llseek,
};
@@ -335,9 +351,9 @@ proc_lookupfdinfo(struct inode *dir, struct dentry *dentry, unsigned int flags)
return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
}
-static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
+static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir)
{
- return proc_readfd_common(file, ctx,
+ return proc_readfd_common(filp, dirent, filldir,
proc_fdinfo_instantiate);
}
@@ -348,6 +364,6 @@ const struct inode_operations proc_fdinfo_inode_operations = {
const struct file_operations proc_fdinfo_operations = {
.read = generic_read_dir,
- .iterate = proc_readfdinfo,
+ .readdir = proc_readfdinfo,
.llseek = default_llseek,
};
diff --git a/trunk/fs/proc/generic.c b/trunk/fs/proc/generic.c
index 94441a407337..a2596afffae6 100644
--- a/trunk/fs/proc/generic.c
+++ b/trunk/fs/proc/generic.c
@@ -233,52 +233,76 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
* value of the readdir() call, as long as it's non-negative
* for success..
*/
-int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
- struct dir_context *ctx)
+int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
+ filldir_t filldir)
{
+ unsigned int ino;
int i;
+ struct inode *inode = file_inode(filp);
+ int ret = 0;
+
+ ino = inode->i_ino;
+ i = filp->f_pos;
+ switch (i) {
+ case 0:
+ if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+ goto out;
+ i++;
+ filp->f_pos++;
+ /* fall through */
+ case 1:
+ if (filldir(dirent, "..", 2, i,
+ parent_ino(filp->f_path.dentry),
+ DT_DIR) < 0)
+ goto out;
+ i++;
+ filp->f_pos++;
+ /* fall through */
+ default:
+ spin_lock(&proc_subdir_lock);
+ de = de->subdir;
+ i -= 2;
+ for (;;) {
+ if (!de) {
+ ret = 1;
+ spin_unlock(&proc_subdir_lock);
+ goto out;
+ }
+ if (!i)
+ break;
+ de = de->next;
+ i--;
+ }
- if (!dir_emit_dots(file, ctx))
- return 0;
-
- spin_lock(&proc_subdir_lock);
- de = de->subdir;
- i = ctx->pos - 2;
- for (;;) {
- if (!de) {
+ do {
+ struct proc_dir_entry *next;
+
+ /* filldir passes info to user space */
+ pde_get(de);
+ spin_unlock(&proc_subdir_lock);
+ if (filldir(dirent, de->name, de->namelen, filp->f_pos,
+ de->low_ino, de->mode >> 12) < 0) {
+ pde_put(de);
+ goto out;
+ }
+ spin_lock(&proc_subdir_lock);
+ filp->f_pos++;
+ next = de->next;
+ pde_put(de);
+ de = next;
+ } while (de);
spin_unlock(&proc_subdir_lock);
- return 0;
- }
- if (!i)
- break;
- de = de->next;
- i--;
}
-
- do {
- struct proc_dir_entry *next;
- pde_get(de);
- spin_unlock(&proc_subdir_lock);
- if (!dir_emit(ctx, de->name, de->namelen,
- de->low_ino, de->mode >> 12)) {
- pde_put(de);
- return 0;
- }
- spin_lock(&proc_subdir_lock);
- ctx->pos++;
- next = de->next;
- pde_put(de);
- de = next;
- } while (de);
- spin_unlock(&proc_subdir_lock);
- return 0;
+ ret = 1;
+out:
+ return ret;
}
-int proc_readdir(struct file *file, struct dir_context *ctx)
+int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
- return proc_readdir_de(PDE(inode), file, ctx);
+ return proc_readdir_de(PDE(inode), filp, dirent, filldir);
}
/*
@@ -289,7 +313,7 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
static const struct file_operations proc_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = proc_readdir,
+ .readdir = proc_readdir,
};
/*
diff --git a/trunk/fs/proc/internal.h b/trunk/fs/proc/internal.h
index 4eae2e149f31..d600fb098b6a 100644
--- a/trunk/fs/proc/internal.h
+++ b/trunk/fs/proc/internal.h
@@ -165,14 +165,14 @@ extern int proc_setattr(struct dentry *, struct iattr *);
extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *);
extern int pid_revalidate(struct dentry *, unsigned int);
extern int pid_delete_dentry(const struct dentry *);
-extern int proc_pid_readdir(struct file *, struct dir_context *);
+extern int proc_pid_readdir(struct file *, void *, filldir_t);
extern struct dentry *proc_pid_lookup(struct inode *, struct dentry *, unsigned int);
extern loff_t mem_lseek(struct file *, loff_t, int);
/* Lookups */
typedef struct dentry *instantiate_t(struct inode *, struct dentry *,
struct task_struct *, const void *);
-extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, int,
+extern int proc_fill_cache(struct file *, void *, filldir_t, const char *, int,
instantiate_t, struct task_struct *, const void *);
/*
@@ -183,8 +183,8 @@ extern spinlock_t proc_subdir_lock;
extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
struct dentry *);
-extern int proc_readdir(struct file *, struct dir_context *);
-extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
+extern int proc_readdir(struct file *, void *, filldir_t);
+extern int proc_readdir_de(struct proc_dir_entry *, struct file *, void *, filldir_t);
static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
{
diff --git a/trunk/fs/proc/namespaces.c b/trunk/fs/proc/namespaces.c
index f6abbbbfad8a..54bdc6701e9f 100644
--- a/trunk/fs/proc/namespaces.c
+++ b/trunk/fs/proc/namespaces.c
@@ -213,36 +213,74 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
return error;
}
-static int proc_ns_dir_readdir(struct file *file, struct dir_context *ctx)
+static int proc_ns_fill_cache(struct file *filp, void *dirent,
+ filldir_t filldir, struct task_struct *task,
+ const struct proc_ns_operations *ops)
{
- struct task_struct *task = get_proc_task(file_inode(file));
+ return proc_fill_cache(filp, dirent, filldir,
+ ops->name, strlen(ops->name),
+ proc_ns_instantiate, task, ops);
+}
+
+static int proc_ns_dir_readdir(struct file *filp, void *dirent,
+ filldir_t filldir)
+{
+ int i;
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ struct task_struct *task = get_proc_task(inode);
const struct proc_ns_operations **entry, **last;
+ ino_t ino;
+ int ret;
+ ret = -ENOENT;
if (!task)
- return -ENOENT;
+ goto out_no_task;
- if (!dir_emit_dots(file, ctx))
- goto out;
- if (ctx->pos >= 2 + ARRAY_SIZE(ns_entries))
- goto out;
- entry = ns_entries + (ctx->pos - 2);
- last = &ns_entries[ARRAY_SIZE(ns_entries) - 1];
- while (entry <= last) {
- const struct proc_ns_operations *ops = *entry;
- if (!proc_fill_cache(file, ctx, ops->name, strlen(ops->name),
- proc_ns_instantiate, task, ops))
- break;
- ctx->pos++;
- entry++;
+ ret = 0;
+ i = filp->f_pos;
+ switch (i) {
+ case 0:
+ ino = inode->i_ino;
+ if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+ goto out;
+ i++;
+ filp->f_pos++;
+ /* fall through */
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
+ goto out;
+ i++;
+ filp->f_pos++;
+ /* fall through */
+ default:
+ i -= 2;
+ if (i >= ARRAY_SIZE(ns_entries)) {
+ ret = 1;
+ goto out;
+ }
+ entry = ns_entries + i;
+ last = &ns_entries[ARRAY_SIZE(ns_entries) - 1];
+ while (entry <= last) {
+ if (proc_ns_fill_cache(filp, dirent, filldir,
+ task, *entry) < 0)
+ goto out;
+ filp->f_pos++;
+ entry++;
+ }
}
+
+ ret = 1;
out:
put_task_struct(task);
- return 0;
+out_no_task:
+ return ret;
}
const struct file_operations proc_ns_dir_operations = {
.read = generic_read_dir,
- .iterate = proc_ns_dir_readdir,
+ .readdir = proc_ns_dir_readdir,
};
static struct dentry *proc_ns_dir_lookup(struct inode *dir,
diff --git a/trunk/fs/proc/proc_net.c b/trunk/fs/proc/proc_net.c
index 4677bb7dc7c2..986e83220d56 100644
--- a/trunk/fs/proc/proc_net.c
+++ b/trunk/fs/proc/proc_net.c
@@ -160,15 +160,16 @@ const struct inode_operations proc_net_inode_operations = {
.getattr = proc_tgid_net_getattr,
};
-static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx)
+static int proc_tgid_net_readdir(struct file *filp, void *dirent,
+ filldir_t filldir)
{
int ret;
struct net *net;
ret = -EINVAL;
- net = get_proc_task_net(file_inode(file));
+ net = get_proc_task_net(file_inode(filp));
if (net != NULL) {
- ret = proc_readdir_de(net->proc_net, file, ctx);
+ ret = proc_readdir_de(net->proc_net, filp, dirent, filldir);
put_net(net);
}
return ret;
@@ -177,7 +178,7 @@ static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx)
const struct file_operations proc_net_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = proc_tgid_net_readdir,
+ .readdir = proc_tgid_net_readdir,
};
static __net_init int proc_net_ns_init(struct net *net)
diff --git a/trunk/fs/proc/proc_sysctl.c b/trunk/fs/proc/proc_sysctl.c
index f3a570e7c257..ac05f33a0dde 100644
--- a/trunk/fs/proc/proc_sysctl.c
+++ b/trunk/fs/proc/proc_sysctl.c
@@ -573,12 +573,12 @@ static unsigned int proc_sys_poll(struct file *filp, poll_table *wait)
return ret;
}
-static bool proc_sys_fill_cache(struct file *file,
- struct dir_context *ctx,
+static int proc_sys_fill_cache(struct file *filp, void *dirent,
+ filldir_t filldir,
struct ctl_table_header *head,
struct ctl_table *table)
{
- struct dentry *child, *dir = file->f_path.dentry;
+ struct dentry *child, *dir = filp->f_path.dentry;
struct inode *inode;
struct qstr qname;
ino_t ino = 0;
@@ -595,38 +595,38 @@ static bool proc_sys_fill_cache(struct file *file,
inode = proc_sys_make_inode(dir->d_sb, head, table);
if (!inode) {
dput(child);
- return false;
+ return -ENOMEM;
} else {
d_set_d_op(child, &proc_sys_dentry_operations);
d_add(child, inode);
}
} else {
- return false;
+ return -ENOMEM;
}
}
inode = child->d_inode;
ino = inode->i_ino;
type = inode->i_mode >> 12;
dput(child);
- return dir_emit(ctx, qname.name, qname.len, ino, type);
+ return !!filldir(dirent, qname.name, qname.len, filp->f_pos, ino, type);
}
-static bool proc_sys_link_fill_cache(struct file *file,
- struct dir_context *ctx,
+static int proc_sys_link_fill_cache(struct file *filp, void *dirent,
+ filldir_t filldir,
struct ctl_table_header *head,
struct ctl_table *table)
{
- bool ret = true;
+ int err, ret = 0;
head = sysctl_head_grab(head);
if (S_ISLNK(table->mode)) {
/* It is not an error if we can not follow the link ignore it */
- int err = sysctl_follow_link(&head, &table, current->nsproxy);
+ err = sysctl_follow_link(&head, &table, current->nsproxy);
if (err)
goto out;
}
- ret = proc_sys_fill_cache(file, ctx, head, table);
+ ret = proc_sys_fill_cache(filp, dirent, filldir, head, table);
out:
sysctl_head_finish(head);
return ret;
@@ -634,50 +634,67 @@ static bool proc_sys_link_fill_cache(struct file *file,
static int scan(struct ctl_table_header *head, ctl_table *table,
unsigned long *pos, struct file *file,
- struct dir_context *ctx)
+ void *dirent, filldir_t filldir)
{
- bool res;
+ int res;
- if ((*pos)++ < ctx->pos)
- return true;
+ if ((*pos)++ < file->f_pos)
+ return 0;
if (unlikely(S_ISLNK(table->mode)))
- res = proc_sys_link_fill_cache(file, ctx, head, table);
+ res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
else
- res = proc_sys_fill_cache(file, ctx, head, table);
+ res = proc_sys_fill_cache(file, dirent, filldir, head, table);
- if (res)
- ctx->pos = *pos;
+ if (res == 0)
+ file->f_pos = *pos;
return res;
}
-static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
+static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct ctl_table_header *head = grab_header(file_inode(file));
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ struct ctl_table_header *head = grab_header(inode);
struct ctl_table_header *h = NULL;
struct ctl_table *entry;
struct ctl_dir *ctl_dir;
unsigned long pos;
+ int ret = -EINVAL;
if (IS_ERR(head))
return PTR_ERR(head);
ctl_dir = container_of(head, struct ctl_dir, header);
- if (!dir_emit_dots(file, ctx))
- return 0;
-
+ ret = 0;
+ /* Avoid a switch here: arm builds fail with missing __cmpdi2 */
+ if (filp->f_pos == 0) {
+ if (filldir(dirent, ".", 1, filp->f_pos,
+ inode->i_ino, DT_DIR) < 0)
+ goto out;
+ filp->f_pos++;
+ }
+ if (filp->f_pos == 1) {
+ if (filldir(dirent, "..", 2, filp->f_pos,
+ parent_ino(dentry), DT_DIR) < 0)
+ goto out;
+ filp->f_pos++;
+ }
pos = 2;
for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) {
- if (!scan(h, entry, &pos, file, ctx)) {
+ ret = scan(h, entry, &pos, filp, dirent, filldir);
+ if (ret) {
sysctl_head_finish(h);
break;
}
}
+ ret = 1;
+out:
sysctl_head_finish(head);
- return 0;
+ return ret;
}
static int proc_sys_permission(struct inode *inode, int mask)
@@ -752,7 +769,7 @@ static const struct file_operations proc_sys_file_operations = {
static const struct file_operations proc_sys_dir_file_operations = {
.read = generic_read_dir,
- .iterate = proc_sys_readdir,
+ .readdir = proc_sys_readdir,
.llseek = generic_file_llseek,
};
diff --git a/trunk/fs/proc/root.c b/trunk/fs/proc/root.c
index 229e366598da..41a6ea93f486 100644
--- a/trunk/fs/proc/root.c
+++ b/trunk/fs/proc/root.c
@@ -202,14 +202,21 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr
return proc_pid_lookup(dir, dentry, flags);
}
-static int proc_root_readdir(struct file *file, struct dir_context *ctx)
+static int proc_root_readdir(struct file * filp,
+ void * dirent, filldir_t filldir)
{
- if (ctx->pos < FIRST_PROCESS_ENTRY) {
- proc_readdir(file, ctx);
- ctx->pos = FIRST_PROCESS_ENTRY;
+ unsigned int nr = filp->f_pos;
+ int ret;
+
+ if (nr < FIRST_PROCESS_ENTRY) {
+ int error = proc_readdir(filp, dirent, filldir);
+ if (error <= 0)
+ return error;
+ filp->f_pos = FIRST_PROCESS_ENTRY;
}
- return proc_pid_readdir(file, ctx);
+ ret = proc_pid_readdir(filp, dirent, filldir);
+ return ret;
}
/*
@@ -219,7 +226,7 @@ static int proc_root_readdir(struct file *file, struct dir_context *ctx)
*/
static const struct file_operations proc_root_operations = {
.read = generic_read_dir,
- .iterate = proc_root_readdir,
+ .readdir = proc_root_readdir,
.llseek = default_llseek,
};
diff --git a/trunk/fs/qnx4/dir.c b/trunk/fs/qnx4/dir.c
index b218f965817b..28ce014b3cef 100644
--- a/trunk/fs/qnx4/dir.c
+++ b/trunk/fs/qnx4/dir.c
@@ -14,9 +14,9 @@
#include
#include "qnx4.h"
-static int qnx4_readdir(struct file *file, struct dir_context *ctx)
+static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
unsigned int offset;
struct buffer_head *bh;
struct qnx4_inode_entry *de;
@@ -26,44 +26,48 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
int size;
QNX4DEBUG((KERN_INFO "qnx4_readdir:i_size = %ld\n", (long) inode->i_size));
- QNX4DEBUG((KERN_INFO "pos = %ld\n", (long) ctx->pos));
+ QNX4DEBUG((KERN_INFO "filp->f_pos = %ld\n", (long) filp->f_pos));
- while (ctx->pos < inode->i_size) {
- blknum = qnx4_block_map(inode, ctx->pos >> QNX4_BLOCK_SIZE_BITS);
+ while (filp->f_pos < inode->i_size) {
+ blknum = qnx4_block_map( inode, filp->f_pos >> QNX4_BLOCK_SIZE_BITS );
bh = sb_bread(inode->i_sb, blknum);
- if (bh == NULL) {
+ if(bh==NULL) {
printk(KERN_ERR "qnx4_readdir: bread failed (%ld)\n", blknum);
- return 0;
+ break;
}
- ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
- for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
+ ix = (int)(filp->f_pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
+ while (ix < QNX4_INODES_PER_BLOCK) {
offset = ix * QNX4_DIR_ENTRY_SIZE;
de = (struct qnx4_inode_entry *) (bh->b_data + offset);
- if (!de->di_fname[0])
- continue;
- if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
- continue;
- if (!(de->di_status & QNX4_FILE_LINK))
- size = QNX4_SHORT_NAME_MAX;
- else
- size = QNX4_NAME_MAX;
- size = strnlen(de->di_fname, size);
- QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
- if (!(de->di_status & QNX4_FILE_LINK))
- ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
- else {
- le = (struct qnx4_link_info*)de;
- ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
- QNX4_INODES_PER_BLOCK +
- le->dl_inode_ndx;
- }
- if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) {
- brelse(bh);
- return 0;
+ size = strlen(de->di_fname);
+ if (size) {
+ if ( !( de->di_status & QNX4_FILE_LINK ) && size > QNX4_SHORT_NAME_MAX )
+ size = QNX4_SHORT_NAME_MAX;
+ else if ( size > QNX4_NAME_MAX )
+ size = QNX4_NAME_MAX;
+
+ if ( ( de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK) ) != 0 ) {
+ QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
+ if ( ( de->di_status & QNX4_FILE_LINK ) == 0 )
+ ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
+ else {
+ le = (struct qnx4_link_info*)de;
+ ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
+ QNX4_INODES_PER_BLOCK +
+ le->dl_inode_ndx;
+ }
+ if (filldir(dirent, de->di_fname, size, filp->f_pos, ino, DT_UNKNOWN) < 0) {
+ brelse(bh);
+ goto out;
+ }
+ }
}
+ ix++;
+ filp->f_pos += QNX4_DIR_ENTRY_SIZE;
}
brelse(bh);
}
+out:
return 0;
}
@@ -71,7 +75,7 @@ const struct file_operations qnx4_dir_operations =
{
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = qnx4_readdir,
+ .readdir = qnx4_readdir,
.fsync = generic_file_fsync,
};
diff --git a/trunk/fs/qnx6/dir.c b/trunk/fs/qnx6/dir.c
index 15b7d92ed60d..afa6be6fc397 100644
--- a/trunk/fs/qnx6/dir.c
+++ b/trunk/fs/qnx6/dir.c
@@ -65,8 +65,8 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
static int qnx6_dir_longfilename(struct inode *inode,
struct qnx6_long_dir_entry *de,
- struct dir_context *ctx,
- unsigned de_inode)
+ void *dirent, loff_t pos,
+ unsigned de_inode, filldir_t filldir)
{
struct qnx6_long_filename *lf;
struct super_block *s = inode->i_sb;
@@ -104,7 +104,8 @@ static int qnx6_dir_longfilename(struct inode *inode,
QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s inode:%u\n",
lf_size, lf->lf_fname, de_inode));
- if (!dir_emit(ctx, lf->lf_fname, lf_size, de_inode, DT_UNKNOWN)) {
+ if (filldir(dirent, lf->lf_fname, lf_size, pos, de_inode,
+ DT_UNKNOWN) < 0) {
qnx6_put_page(page);
return 0;
}
@@ -114,19 +115,18 @@ static int qnx6_dir_longfilename(struct inode *inode,
return 1;
}
-static int qnx6_readdir(struct file *file, struct dir_context *ctx)
+static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
struct super_block *s = inode->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s);
- loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
+ loff_t pos = filp->f_pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
unsigned long npages = dir_pages(inode);
unsigned long n = pos >> PAGE_CACHE_SHIFT;
unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE;
bool done = false;
- ctx->pos = pos;
- if (ctx->pos >= inode->i_size)
+ if (filp->f_pos >= inode->i_size)
return 0;
for ( ; !done && n < npages; n++, start = 0) {
@@ -137,11 +137,11 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
if (IS_ERR(page)) {
printk(KERN_ERR "qnx6_readdir: read failed\n");
- ctx->pos = (n + 1) << PAGE_CACHE_SHIFT;
+ filp->f_pos = (n + 1) << PAGE_CACHE_SHIFT;
return PTR_ERR(page);
}
de = ((struct qnx6_dir_entry *)page_address(page)) + start;
- for (; i < limit; i++, de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) {
+ for (; i < limit; i++, de++, pos += QNX6_DIR_ENTRY_SIZE) {
int size = de->de_size;
u32 no_inode = fs32_to_cpu(sbi, de->de_inode);
@@ -154,7 +154,8 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
structure / block */
if (!qnx6_dir_longfilename(inode,
(struct qnx6_long_dir_entry *)de,
- ctx, no_inode)) {
+ dirent, pos, no_inode,
+ filldir)) {
done = true;
break;
}
@@ -162,8 +163,9 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s"
" inode:%u\n", size, de->de_fname,
no_inode));
- if (!dir_emit(ctx, de->de_fname, size,
- no_inode, DT_UNKNOWN)) {
+ if (filldir(dirent, de->de_fname, size,
+ pos, no_inode, DT_UNKNOWN)
+ < 0) {
done = true;
break;
}
@@ -171,6 +173,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
}
qnx6_put_page(page);
}
+ filp->f_pos = pos;
return 0;
}
@@ -279,7 +282,7 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
const struct file_operations qnx6_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = qnx6_readdir,
+ .readdir = qnx6_readdir,
.fsync = generic_file_fsync,
};
diff --git a/trunk/fs/read_write.c b/trunk/fs/read_write.c
index 2cefa417be34..03430008704e 100644
--- a/trunk/fs/read_write.c
+++ b/trunk/fs/read_write.c
@@ -1064,7 +1064,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
struct fd in, out;
struct inode *in_inode, *out_inode;
loff_t pos;
- loff_t out_pos;
ssize_t retval;
int fl;
@@ -1078,14 +1077,12 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
if (!(in.file->f_mode & FMODE_READ))
goto fput_in;
retval = -ESPIPE;
- if (!ppos) {
- pos = in.file->f_pos;
- } else {
- pos = *ppos;
+ if (!ppos)
+ ppos = &in.file->f_pos;
+ else
if (!(in.file->f_mode & FMODE_PREAD))
goto fput_in;
- }
- retval = rw_verify_area(READ, in.file, &pos, count);
+ retval = rw_verify_area(READ, in.file, ppos, count);
if (retval < 0)
goto fput_in;
count = retval;
@@ -1102,8 +1099,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
retval = -EINVAL;
in_inode = file_inode(in.file);
out_inode = file_inode(out.file);
- out_pos = out.file->f_pos;
- retval = rw_verify_area(WRITE, out.file, &out_pos, count);
+ retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count);
if (retval < 0)
goto fput_out;
count = retval;
@@ -1111,6 +1107,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
if (!max)
max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
+ pos = *ppos;
if (unlikely(pos + count > max)) {
retval = -EOVERFLOW;
if (pos >= max)
@@ -1129,23 +1126,18 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
if (in.file->f_flags & O_NONBLOCK)
fl = SPLICE_F_NONBLOCK;
#endif
- retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
+ retval = do_splice_direct(in.file, ppos, out.file, count, fl);
if (retval > 0) {
add_rchar(current, retval);
add_wchar(current, retval);
fsnotify_access(in.file);
fsnotify_modify(out.file);
- out.file->f_pos = out_pos;
- if (ppos)
- *ppos = pos;
- else
- in.file->f_pos = pos;
}
inc_syscr(current);
inc_syscw(current);
- if (pos > max)
+ if (*ppos > max)
retval = -EOVERFLOW;
fput_out:
diff --git a/trunk/fs/readdir.c b/trunk/fs/readdir.c
index 93d71e574310..fee38e04fae4 100644
--- a/trunk/fs/readdir.c
+++ b/trunk/fs/readdir.c
@@ -20,11 +20,11 @@
#include
-int iterate_dir(struct file *file, struct dir_context *ctx)
+int vfs_readdir(struct file *file, filldir_t filler, void *buf)
{
struct inode *inode = file_inode(file);
int res = -ENOTDIR;
- if (!file->f_op || !file->f_op->iterate)
+ if (!file->f_op || !file->f_op->readdir)
goto out;
res = security_file_permission(file, MAY_READ);
@@ -37,16 +37,15 @@ int iterate_dir(struct file *file, struct dir_context *ctx)
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
- ctx->pos = file->f_pos;
- res = file->f_op->iterate(file, ctx);
- file->f_pos = ctx->pos;
+ res = file->f_op->readdir(file, buf, filler);
file_accessed(file);
}
mutex_unlock(&inode->i_mutex);
out:
return res;
}
-EXPORT_SYMBOL(iterate_dir);
+
+EXPORT_SYMBOL(vfs_readdir);
/*
* Traditional linux readdir() handling..
@@ -67,7 +66,6 @@ struct old_linux_dirent {
};
struct readdir_callback {
- struct dir_context ctx;
struct old_linux_dirent __user * dirent;
int result;
};
@@ -75,7 +73,7 @@ struct readdir_callback {
static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset,
u64 ino, unsigned int d_type)
{
- struct readdir_callback *buf = (struct readdir_callback *) __buf;
+ struct readdir_callback * buf = (struct readdir_callback *) __buf;
struct old_linux_dirent __user * dirent;
unsigned long d_ino;
@@ -109,15 +107,15 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
{
int error;
struct fd f = fdget(fd);
- struct readdir_callback buf = {
- .ctx.actor = fillonedir,
- .dirent = dirent
- };
+ struct readdir_callback buf;
if (!f.file)
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ buf.result = 0;
+ buf.dirent = dirent;
+
+ error = vfs_readdir(f.file, fillonedir, &buf);
if (buf.result)
error = buf.result;
@@ -139,7 +137,6 @@ struct linux_dirent {
};
struct getdents_callback {
- struct dir_context ctx;
struct linux_dirent __user * current_dir;
struct linux_dirent __user * previous;
int count;
@@ -194,11 +191,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
{
struct fd f;
struct linux_dirent __user * lastdirent;
- struct getdents_callback buf = {
- .ctx.actor = filldir,
- .count = count,
- .current_dir = dirent
- };
+ struct getdents_callback buf;
int error;
if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -208,12 +201,17 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
if (!f.file)
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ buf.current_dir = dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(f.file, filldir, &buf);
if (error >= 0)
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
- if (put_user(buf.ctx.pos, &lastdirent->d_off))
+ if (put_user(f.file->f_pos, &lastdirent->d_off))
error = -EFAULT;
else
error = count - buf.count;
@@ -223,7 +221,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
}
struct getdents_callback64 {
- struct dir_context ctx;
struct linux_dirent64 __user * current_dir;
struct linux_dirent64 __user * previous;
int count;
@@ -274,11 +271,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
{
struct fd f;
struct linux_dirent64 __user * lastdirent;
- struct getdents_callback64 buf = {
- .ctx.actor = filldir64,
- .count = count,
- .current_dir = dirent
- };
+ struct getdents_callback64 buf;
int error;
if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -288,12 +281,17 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
if (!f.file)
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ buf.current_dir = dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(f.file, filldir64, &buf);
if (error >= 0)
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
- typeof(lastdirent->d_off) d_off = buf.ctx.pos;
+ typeof(lastdirent->d_off) d_off = f.file->f_pos;
if (__put_user(d_off, &lastdirent->d_off))
error = -EFAULT;
else
diff --git a/trunk/fs/reiserfs/dir.c b/trunk/fs/reiserfs/dir.c
index 03e4ca5624d6..6c2d136561cb 100644
--- a/trunk/fs/reiserfs/dir.c
+++ b/trunk/fs/reiserfs/dir.c
@@ -13,14 +13,14 @@
extern const struct reiserfs_key MIN_KEY;
-static int reiserfs_readdir(struct file *, struct dir_context *);
+static int reiserfs_readdir(struct file *, void *, filldir_t);
static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end,
int datasync);
const struct file_operations reiserfs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = reiserfs_readdir,
+ .readdir = reiserfs_readdir,
.fsync = reiserfs_dir_fsync,
.unlocked_ioctl = reiserfs_ioctl,
#ifdef CONFIG_COMPAT
@@ -50,15 +50,18 @@ static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end,
#define store_ih(where,what) copy_item_head (where, what)
-static inline bool is_privroot_deh(struct inode *dir, struct reiserfs_de_head *deh)
+static inline bool is_privroot_deh(struct dentry *dir,
+ struct reiserfs_de_head *deh)
{
- struct dentry *privroot = REISERFS_SB(dir->i_sb)->priv_root;
- return (privroot->d_inode &&
+ struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
+ return (dir == dir->d_parent && privroot->d_inode &&
deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
}
-int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
+int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
+ filldir_t filldir, loff_t *pos)
{
+ struct inode *inode = dentry->d_inode;
struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */
INITIALIZE_PATH(path_to_entry);
struct buffer_head *bh;
@@ -78,7 +81,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
/* form key for search the next directory entry using f_pos field of
file structure */
- make_cpu_key(&pos_key, inode, ctx->pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3);
+ make_cpu_key(&pos_key, inode, *pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3);
next_pos = cpu_key_k_offset(&pos_key);
path_to_entry.reada = PATH_READA;
@@ -123,6 +126,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
entry_num++, deh++) {
int d_reclen;
char *d_name;
+ off_t d_off;
ino_t d_ino;
if (!de_visible(deh))
@@ -151,10 +155,11 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
}
/* Ignore the .reiserfs_priv entry */
- if (is_privroot_deh(inode, deh))
+ if (is_privroot_deh(dentry, deh))
continue;
- ctx->pos = deh_offset(deh);
+ d_off = deh_offset(deh);
+ *pos = d_off;
d_ino = deh_objectid(deh);
if (d_reclen <= 32) {
local_buf = small_buf;
@@ -182,9 +187,9 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
* the write lock here for other waiters
*/
reiserfs_write_unlock(inode->i_sb);
- if (!dir_emit
- (ctx, local_buf, d_reclen, d_ino,
- DT_UNKNOWN)) {
+ if (filldir
+ (dirent, local_buf, d_reclen, d_off, d_ino,
+ DT_UNKNOWN) < 0) {
reiserfs_write_lock(inode->i_sb);
if (local_buf != small_buf) {
kfree(local_buf);
@@ -232,7 +237,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
} /* while */
end:
- ctx->pos = next_pos;
+ *pos = next_pos;
pathrelse(&path_to_entry);
reiserfs_check_path(&path_to_entry);
out:
@@ -240,9 +245,10 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
return ret;
}
-static int reiserfs_readdir(struct file *file, struct dir_context *ctx)
+static int reiserfs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
- return reiserfs_readdir_inode(file_inode(file), ctx);
+ struct dentry *dentry = file->f_path.dentry;
+ return reiserfs_readdir_dentry(dentry, dirent, filldir, &file->f_pos);
}
/* compose directory item containing "." and ".." entries (entries are
diff --git a/trunk/fs/reiserfs/inode.c b/trunk/fs/reiserfs/inode.c
index 0048cc16a6a8..f844533792ee 100644
--- a/trunk/fs/reiserfs/inode.c
+++ b/trunk/fs/reiserfs/inode.c
@@ -2975,19 +2975,16 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
}
/* clm -- taken from fs/buffer.c:block_invalidate_page */
-static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
struct inode *inode = page->mapping->host;
unsigned int curr_off = 0;
- unsigned int stop = offset + length;
- int partial_page = (offset || length < PAGE_CACHE_SIZE);
int ret = 1;
BUG_ON(!PageLocked(page));
- if (!partial_page)
+ if (offset == 0)
ClearPageChecked(page);
if (!page_has_buffers(page))
@@ -2999,9 +2996,6 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
unsigned int next_off = curr_off + bh->b_size;
next = bh->b_this_page;
- if (next_off > stop)
- goto out;
-
/*
* is this block fully invalidated?
*/
@@ -3020,7 +3014,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
* The get_block cached value has been unconditionally invalidated,
* so real IO is not possible anymore.
*/
- if (!partial_page && ret) {
+ if (!offset && ret) {
ret = try_to_release_page(page, 0);
/* maybe should BUG_ON(!ret); - neilb */
}
diff --git a/trunk/fs/reiserfs/reiserfs.h b/trunk/fs/reiserfs/reiserfs.h
index 3df5ce6c724d..157e474ab303 100644
--- a/trunk/fs/reiserfs/reiserfs.h
+++ b/trunk/fs/reiserfs/reiserfs.h
@@ -2709,7 +2709,7 @@ extern const struct inode_operations reiserfs_dir_inode_operations;
extern const struct inode_operations reiserfs_symlink_inode_operations;
extern const struct inode_operations reiserfs_special_inode_operations;
extern const struct file_operations reiserfs_dir_operations;
-int reiserfs_readdir_inode(struct inode *, struct dir_context *);
+int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *);
/* tail_conversion.c */
int direct2indirect(struct reiserfs_transaction_handle *, struct inode *,
diff --git a/trunk/fs/reiserfs/xattr.c b/trunk/fs/reiserfs/xattr.c
index c69cdd749f09..821bcf70e467 100644
--- a/trunk/fs/reiserfs/xattr.c
+++ b/trunk/fs/reiserfs/xattr.c
@@ -171,7 +171,6 @@ static struct dentry *open_xa_dir(const struct inode *inode, int flags)
* modifying extended attributes. This includes operations such as permissions
* or ownership changes, object deletions, etc. */
struct reiserfs_dentry_buf {
- struct dir_context ctx;
struct dentry *xadir;
int count;
struct dentry *dentries[8];
@@ -224,8 +223,9 @@ static int reiserfs_for_each_xattr(struct inode *inode,
{
struct dentry *dir;
int i, err = 0;
+ loff_t pos = 0;
struct reiserfs_dentry_buf buf = {
- .ctx.actor = fill_with_dentries,
+ .count = 0,
};
/* Skip out, an xattr has no xattrs associated with it */
@@ -249,27 +249,29 @@ static int reiserfs_for_each_xattr(struct inode *inode,
reiserfs_write_lock(inode->i_sb);
buf.xadir = dir;
- while (1) {
- err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx);
- if (err)
- break;
- if (!buf.count)
- break;
- for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
+ err = reiserfs_readdir_dentry(dir, &buf, fill_with_dentries, &pos);
+ while ((err == 0 || err == -ENOSPC) && buf.count) {
+ err = 0;
+
+ for (i = 0; i < buf.count && buf.dentries[i]; i++) {
+ int lerr = 0;
struct dentry *dentry = buf.dentries[i];
- if (!S_ISDIR(dentry->d_inode->i_mode))
- err = action(dentry, data);
+ if (err == 0 && !S_ISDIR(dentry->d_inode->i_mode))
+ lerr = action(dentry, data);
dput(dentry);
buf.dentries[i] = NULL;
+ err = lerr ?: err;
}
- if (err)
- break;
buf.count = 0;
+ if (!err)
+ err = reiserfs_readdir_dentry(dir, &buf,
+ fill_with_dentries, &pos);
}
mutex_unlock(&dir->d_inode->i_mutex);
+ /* Clean up after a failed readdir */
cleanup_dentry_buf(&buf);
if (!err) {
@@ -798,7 +800,6 @@ int reiserfs_removexattr(struct dentry *dentry, const char *name)
}
struct listxattr_buf {
- struct dir_context ctx;
size_t size;
size_t pos;
char *buf;
@@ -844,8 +845,8 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
{
struct dentry *dir;
int err = 0;
+ loff_t pos = 0;
struct listxattr_buf buf = {
- .ctx.actor = listxattr_filler,
.dentry = dentry,
.buf = buffer,
.size = buffer ? size : 0,
@@ -867,7 +868,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
}
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
- err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx);
+ err = reiserfs_readdir_dentry(dir, &buf, listxattr_filler, &pos);
mutex_unlock(&dir->d_inode->i_mutex);
if (!err)
diff --git a/trunk/fs/romfs/super.c b/trunk/fs/romfs/super.c
index ff1d3d42e72a..15cbc41ee365 100644
--- a/trunk/fs/romfs/super.c
+++ b/trunk/fs/romfs/super.c
@@ -145,18 +145,19 @@ static const struct address_space_operations romfs_aops = {
/*
* read the entries from a directory
*/
-static int romfs_readdir(struct file *file, struct dir_context *ctx)
+static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *i = file_inode(file);
+ struct inode *i = file_inode(filp);
struct romfs_inode ri;
unsigned long offset, maxoff;
int j, ino, nextfh;
+ int stored = 0;
char fsname[ROMFS_MAXFN]; /* XXX dynamic? */
int ret;
maxoff = romfs_maxsize(i->i_sb);
- offset = ctx->pos;
+ offset = filp->f_pos;
if (!offset) {
offset = i->i_ino & ROMFH_MASK;
ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE);
@@ -169,10 +170,10 @@ static int romfs_readdir(struct file *file, struct dir_context *ctx)
for (;;) {
if (!offset || offset >= maxoff) {
offset = maxoff;
- ctx->pos = offset;
+ filp->f_pos = offset;
goto out;
}
- ctx->pos = offset;
+ filp->f_pos = offset;
/* Fetch inode info */
ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE);
@@ -193,14 +194,16 @@ static int romfs_readdir(struct file *file, struct dir_context *ctx)
nextfh = be32_to_cpu(ri.next);
if ((nextfh & ROMFH_TYPE) == ROMFH_HRD)
ino = be32_to_cpu(ri.spec);
- if (!dir_emit(ctx, fsname, j, ino,
- romfs_dtype_table[nextfh & ROMFH_TYPE]))
+ if (filldir(dirent, fsname, j, offset, ino,
+ romfs_dtype_table[nextfh & ROMFH_TYPE]) < 0)
goto out;
+ stored++;
offset = nextfh & ROMFH_MASK;
}
+
out:
- return 0;
+ return stored;
}
/*
@@ -278,7 +281,7 @@ static struct dentry *romfs_lookup(struct inode *dir, struct dentry *dentry,
static const struct file_operations romfs_dir_operations = {
.read = generic_read_dir,
- .iterate = romfs_readdir,
+ .readdir = romfs_readdir,
.llseek = default_llseek,
};
diff --git a/trunk/fs/splice.c b/trunk/fs/splice.c
index d37431dd60a1..e6b25598c8c4 100644
--- a/trunk/fs/splice.c
+++ b/trunk/fs/splice.c
@@ -1274,7 +1274,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
{
struct file *file = sd->u.file;
- return do_splice_from(pipe, file, sd->opos, sd->total_len,
+ return do_splice_from(pipe, file, &file->f_pos, sd->total_len,
sd->flags);
}
@@ -1283,7 +1283,6 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
* @in: file to splice from
* @ppos: input file offset
* @out: file to splice to
- * @opos: output file offset
* @len: number of bytes to splice
* @flags: splice modifier flags
*
@@ -1295,7 +1294,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
*
*/
long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
- loff_t *opos, size_t len, unsigned int flags)
+ size_t len, unsigned int flags)
{
struct splice_desc sd = {
.len = len,
@@ -1303,7 +1302,6 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
.flags = flags,
.pos = *ppos,
.u.file = out,
- .opos = opos,
};
long ret;
@@ -1327,7 +1325,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
{
struct pipe_inode_info *ipipe;
struct pipe_inode_info *opipe;
- loff_t offset;
+ loff_t offset, *off;
long ret;
ipipe = get_pipe_info(in);
@@ -1358,15 +1356,13 @@ static long do_splice(struct file *in, loff_t __user *off_in,
return -EINVAL;
if (copy_from_user(&offset, off_out, sizeof(loff_t)))
return -EFAULT;
- } else {
- offset = out->f_pos;
- }
+ off = &offset;
+ } else
+ off = &out->f_pos;
- ret = do_splice_from(ipipe, out, &offset, len, flags);
+ ret = do_splice_from(ipipe, out, off, len, flags);
- if (!off_out)
- out->f_pos = offset;
- else if (copy_to_user(off_out, &offset, sizeof(loff_t)))
+ if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
ret = -EFAULT;
return ret;
@@ -1380,15 +1376,13 @@ static long do_splice(struct file *in, loff_t __user *off_in,
return -EINVAL;
if (copy_from_user(&offset, off_in, sizeof(loff_t)))
return -EFAULT;
- } else {
- offset = in->f_pos;
- }
+ off = &offset;
+ } else
+ off = &in->f_pos;
- ret = do_splice_to(in, &offset, opipe, len, flags);
+ ret = do_splice_to(in, off, opipe, len, flags);
- if (!off_in)
- in->f_pos = offset;
- else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
+ if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
ret = -EFAULT;
return ret;
diff --git a/trunk/fs/squashfs/dir.c b/trunk/fs/squashfs/dir.c
index f7f527bf8c10..57dc70ebbb19 100644
--- a/trunk/fs/squashfs/dir.c
+++ b/trunk/fs/squashfs/dir.c
@@ -100,7 +100,7 @@ static int get_dir_index_using_offset(struct super_block *sb,
}
-static int squashfs_readdir(struct file *file, struct dir_context *ctx)
+static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
struct inode *inode = file_inode(file);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
@@ -127,11 +127,11 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx)
* It also means that the external f_pos is offset by 3 from the
* on-disk directory f_pos.
*/
- while (ctx->pos < 3) {
+ while (file->f_pos < 3) {
char *name;
int i_ino;
- if (ctx->pos == 0) {
+ if (file->f_pos == 0) {
name = ".";
size = 1;
i_ino = inode->i_ino;
@@ -141,18 +141,24 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx)
i_ino = squashfs_i(inode)->parent;
}
- if (!dir_emit(ctx, name, size, i_ino,
- squashfs_filetype_table[1]))
+ TRACE("Calling filldir(%p, %s, %d, %lld, %d, %d)\n",
+ dirent, name, size, file->f_pos, i_ino,
+ squashfs_filetype_table[1]);
+
+ if (filldir(dirent, name, size, file->f_pos, i_ino,
+ squashfs_filetype_table[1]) < 0) {
+ TRACE("Filldir returned less than 0\n");
goto finish;
+ }
- ctx->pos += size;
+ file->f_pos += size;
}
length = get_dir_index_using_offset(inode->i_sb, &block, &offset,
squashfs_i(inode)->dir_idx_start,
squashfs_i(inode)->dir_idx_offset,
squashfs_i(inode)->dir_idx_cnt,
- ctx->pos);
+ file->f_pos);
while (length < i_size_read(inode)) {
/*
@@ -192,7 +198,7 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx)
length += sizeof(*dire) + size;
- if (ctx->pos >= length)
+ if (file->f_pos >= length)
continue;
dire->name[size] = '\0';
@@ -200,12 +206,22 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx)
((short) le16_to_cpu(dire->inode_number));
type = le16_to_cpu(dire->type);
- if (!dir_emit(ctx, dire->name, size,
+ TRACE("Calling filldir(%p, %s, %d, %lld, %x:%x, %d, %d)"
+ "\n", dirent, dire->name, size,
+ file->f_pos,
+ le32_to_cpu(dirh.start_block),
+ le16_to_cpu(dire->offset),
+ inode_number,
+ squashfs_filetype_table[type]);
+
+ if (filldir(dirent, dire->name, size, file->f_pos,
inode_number,
- squashfs_filetype_table[type]))
+ squashfs_filetype_table[type]) < 0) {
+ TRACE("Filldir returned less than 0\n");
goto finish;
+ }
- ctx->pos = length;
+ file->f_pos = length;
}
}
@@ -222,6 +238,6 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx)
const struct file_operations squashfs_dir_ops = {
.read = generic_read_dir,
- .iterate = squashfs_readdir,
+ .readdir = squashfs_readdir,
.llseek = default_llseek,
};
diff --git a/trunk/fs/sysfs/dir.c b/trunk/fs/sysfs/dir.c
index 4cfd742d260d..e8e0e71b29d5 100644
--- a/trunk/fs/sysfs/dir.c
+++ b/trunk/fs/sysfs/dir.c
@@ -998,38 +998,68 @@ static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns,
return pos;
}
-static int sysfs_readdir(struct file *file, struct dir_context *ctx)
+static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = filp->f_path.dentry;
struct sysfs_dirent * parent_sd = dentry->d_fsdata;
- struct sysfs_dirent *pos = file->private_data;
+ struct sysfs_dirent *pos = filp->private_data;
enum kobj_ns_type type;
const void *ns;
+ ino_t ino;
+ loff_t off;
type = sysfs_ns_type(parent_sd);
ns = sysfs_info(dentry->d_sb)->ns[type];
- if (!dir_emit_dots(file, ctx))
- return 0;
+ if (filp->f_pos == 0) {
+ ino = parent_sd->s_ino;
+ if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
+ filp->f_pos++;
+ else
+ return 0;
+ }
+ if (filp->f_pos == 1) {
+ if (parent_sd->s_parent)
+ ino = parent_sd->s_parent->s_ino;
+ else
+ ino = parent_sd->s_ino;
+ if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
+ filp->f_pos++;
+ else
+ return 0;
+ }
mutex_lock(&sysfs_mutex);
- for (pos = sysfs_dir_pos(ns, parent_sd, ctx->pos, pos);
+ off = filp->f_pos;
+ for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
pos;
- pos = sysfs_dir_next_pos(ns, parent_sd, ctx->pos, pos)) {
- const char *name = pos->s_name;
- unsigned int type = dt_type(pos);
- int len = strlen(name);
- ino_t ino = pos->s_ino;
- ctx->pos = pos->s_hash;
- file->private_data = sysfs_get(pos);
+ pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) {
+ const char * name;
+ unsigned int type;
+ int len, ret;
+
+ name = pos->s_name;
+ len = strlen(name);
+ ino = pos->s_ino;
+ type = dt_type(pos);
+ off = filp->f_pos = pos->s_hash;
+ filp->private_data = sysfs_get(pos);
mutex_unlock(&sysfs_mutex);
- if (!dir_emit(ctx, name, len, ino, type))
- return 0;
+ ret = filldir(dirent, name, len, off, ino, type);
mutex_lock(&sysfs_mutex);
+ if (ret < 0)
+ break;
}
mutex_unlock(&sysfs_mutex);
- file->private_data = NULL;
- ctx->pos = INT_MAX;
+
+ /* don't reference last entry if its refcount is dropped */
+ if (!pos) {
+ filp->private_data = NULL;
+
+ /* EOF and not changed as 0 or 1 in read/write path */
+ if (off == filp->f_pos && off > 1)
+ filp->f_pos = INT_MAX;
+ }
return 0;
}
@@ -1047,7 +1077,7 @@ static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
const struct file_operations sysfs_dir_operations = {
.read = generic_read_dir,
- .iterate = sysfs_readdir,
+ .readdir = sysfs_readdir,
.release = sysfs_dir_release,
.llseek = sysfs_dir_llseek,
};
diff --git a/trunk/fs/sysv/dir.c b/trunk/fs/sysv/dir.c
index d42291d08215..3799e8dac3eb 100644
--- a/trunk/fs/sysv/dir.c
+++ b/trunk/fs/sysv/dir.c
@@ -18,12 +18,12 @@
#include
#include "sysv.h"
-static int sysv_readdir(struct file *, struct dir_context *);
+static int sysv_readdir(struct file *, void *, filldir_t);
const struct file_operations sysv_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = sysv_readdir,
+ .readdir = sysv_readdir,
.fsync = generic_file_fsync,
};
@@ -65,21 +65,18 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n)
return page;
}
-static int sysv_readdir(struct file *file, struct dir_context *ctx)
+static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- unsigned long pos = ctx->pos;
- struct inode *inode = file_inode(file);
+ unsigned long pos = filp->f_pos;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
+ unsigned offset = pos & ~PAGE_CACHE_MASK;
+ unsigned long n = pos >> PAGE_CACHE_SHIFT;
unsigned long npages = dir_pages(inode);
- unsigned offset;
- unsigned long n;
- ctx->pos = pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1);
+ pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1);
if (pos >= inode->i_size)
- return 0;
-
- offset = pos & ~PAGE_CACHE_MASK;
- n = pos >> PAGE_CACHE_SHIFT;
+ goto done;
for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
@@ -91,21 +88,29 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
kaddr = (char *)page_address(page);
de = (struct sysv_dir_entry *)(kaddr+offset);
limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE;
- for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) {
+ for ( ;(char*)de <= limit; de++) {
char *name = de->name;
+ int over;
if (!de->inode)
continue;
- if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN),
+ offset = (char *)de - kaddr;
+
+ over = filldir(dirent, name, strnlen(name,SYSV_NAMELEN),
+ ((loff_t)n<inode),
- DT_UNKNOWN)) {
+ DT_UNKNOWN);
+ if (over) {
dir_put_page(page);
- return 0;
+ goto done;
}
}
dir_put_page(page);
}
+
+done:
+ filp->f_pos = ((loff_t)n << PAGE_CACHE_SHIFT) | offset;
return 0;
}
diff --git a/trunk/fs/ubifs/dir.c b/trunk/fs/ubifs/dir.c
index 6b4947f75af7..de08c92f2e23 100644
--- a/trunk/fs/ubifs/dir.c
+++ b/trunk/fs/ubifs/dir.c
@@ -346,46 +346,38 @@ static unsigned int vfs_dent_type(uint8_t type)
* This means that UBIFS cannot support NFS which requires full
* 'seekdir()'/'telldir()' support.
*/
-static int ubifs_readdir(struct file *file, struct dir_context *ctx)
+static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
- int err;
+ int err, over = 0;
struct qstr nm;
union ubifs_key key;
struct ubifs_dent_node *dent;
struct inode *dir = file_inode(file);
struct ubifs_info *c = dir->i_sb->s_fs_info;
- dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, ctx->pos);
+ dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
- if (ctx->pos > UBIFS_S_KEY_HASH_MASK || ctx->pos == 2)
+ if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2)
/*
* The directory was seek'ed to a senseless position or there
* are no more entries.
*/
return 0;
- if (file->f_version == 0) {
- /*
- * The file was seek'ed, which means that @file->private_data
- * is now invalid. This may also be just the first
- * 'ubifs_readdir()' invocation, in which case
- * @file->private_data is NULL, and the below code is
- * basically a no-op.
- */
- kfree(file->private_data);
- file->private_data = NULL;
+ /* File positions 0 and 1 correspond to "." and ".." */
+ if (file->f_pos == 0) {
+ ubifs_assert(!file->private_data);
+ over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
+ if (over)
+ return 0;
+ file->f_pos = 1;
}
- /*
- * 'generic_file_llseek()' unconditionally sets @file->f_version to
- * zero, and we use this for detecting whether the file was seek'ed.
- */
- file->f_version = 1;
-
- /* File positions 0 and 1 correspond to "." and ".." */
- if (ctx->pos < 2) {
+ if (file->f_pos == 1) {
ubifs_assert(!file->private_data);
- if (!dir_emit_dots(file, ctx))
+ over = filldir(dirent, "..", 2, 1,
+ parent_ino(file->f_path.dentry), DT_DIR);
+ if (over)
return 0;
/* Find the first entry in TNC and save it */
@@ -397,7 +389,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
goto out;
}
- ctx->pos = key_hash_flash(c, &dent->key);
+ file->f_pos = key_hash_flash(c, &dent->key);
file->private_data = dent;
}
@@ -405,16 +397,17 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
if (!dent) {
/*
* The directory was seek'ed to and is now readdir'ed.
- * Find the entry corresponding to @ctx->pos or the closest one.
+ * Find the entry corresponding to @file->f_pos or the
+ * closest one.
*/
- dent_key_init_hash(c, &key, dir->i_ino, ctx->pos);
+ dent_key_init_hash(c, &key, dir->i_ino, file->f_pos);
nm.name = NULL;
dent = ubifs_tnc_next_ent(c, &key, &nm);
if (IS_ERR(dent)) {
err = PTR_ERR(dent);
goto out;
}
- ctx->pos = key_hash_flash(c, &dent->key);
+ file->f_pos = key_hash_flash(c, &dent->key);
file->private_data = dent;
}
@@ -426,9 +419,10 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
ubifs_inode(dir)->creat_sqnum);
nm.len = le16_to_cpu(dent->nlen);
- if (!dir_emit(ctx, dent->name, nm.len,
+ over = filldir(dirent, dent->name, nm.len, file->f_pos,
le64_to_cpu(dent->inum),
- vfs_dent_type(dent->type)))
+ vfs_dent_type(dent->type));
+ if (over)
return 0;
/* Switch to the next entry */
@@ -441,7 +435,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
}
kfree(file->private_data);
- ctx->pos = key_hash_flash(c, &dent->key);
+ file->f_pos = key_hash_flash(c, &dent->key);
file->private_data = dent;
cond_resched();
}
@@ -454,11 +448,18 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
kfree(file->private_data);
file->private_data = NULL;
- /* 2 is a special value indicating that there are no more direntries */
- ctx->pos = 2;
+ file->f_pos = 2;
return 0;
}
+/* If a directory is seeked, we have to free saved readdir() state */
+static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ kfree(file->private_data);
+ file->private_data = NULL;
+ return generic_file_llseek(file, offset, whence);
+}
+
/* Free saved readdir() state when the directory is closed */
static int ubifs_dir_release(struct inode *dir, struct file *file)
{
@@ -1176,10 +1177,10 @@ const struct inode_operations ubifs_dir_inode_operations = {
};
const struct file_operations ubifs_dir_operations = {
- .llseek = generic_file_llseek,
+ .llseek = ubifs_dir_llseek,
.release = ubifs_dir_release,
.read = generic_read_dir,
- .iterate = ubifs_readdir,
+ .readdir = ubifs_readdir,
.fsync = ubifs_fsync,
.unlocked_ioctl = ubifs_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/trunk/fs/ubifs/file.c b/trunk/fs/ubifs/file.c
index 123c79b7261e..14374530784c 100644
--- a/trunk/fs/ubifs/file.c
+++ b/trunk/fs/ubifs/file.c
@@ -1277,14 +1277,13 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
return err;
}
-static void ubifs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void ubifs_invalidatepage(struct page *page, unsigned long offset)
{
struct inode *inode = page->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
ubifs_assert(PagePrivate(page));
- if (offset || length < PAGE_CACHE_SIZE)
+ if (offset)
/* Partial page remains dirty */
return;
diff --git a/trunk/fs/udf/dir.c b/trunk/fs/udf/dir.c
index a012c51caffd..b3e93f5e17c3 100644
--- a/trunk/fs/udf/dir.c
+++ b/trunk/fs/udf/dir.c
@@ -35,16 +35,14 @@
#include "udf_i.h"
#include "udf_sb.h"
-
-static int udf_readdir(struct file *file, struct dir_context *ctx)
+static int do_udf_readdir(struct inode *dir, struct file *filp,
+ filldir_t filldir, void *dirent)
{
- struct inode *dir = file_inode(file);
- struct udf_inode_info *iinfo = UDF_I(dir);
struct udf_fileident_bh fibh = { .sbh = NULL, .ebh = NULL};
struct fileIdentDesc *fi = NULL;
struct fileIdentDesc cfi;
int block, iblock;
- loff_t nf_pos;
+ loff_t nf_pos = (filp->f_pos - 1) << 2;
int flen;
unsigned char *fname = NULL;
unsigned char *nameptr;
@@ -56,14 +54,10 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
uint32_t elen;
sector_t offset;
int i, num, ret = 0;
+ unsigned int dt_type;
struct extent_position epos = { NULL, 0, {0, 0} };
+ struct udf_inode_info *iinfo;
- if (ctx->pos == 0) {
- if (!dir_emit_dot(file, ctx))
- return 0;
- ctx->pos = 1;
- }
- nf_pos = (ctx->pos - 1) << 2;
if (nf_pos >= size)
goto out;
@@ -77,6 +71,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
nf_pos = udf_ext0_offset(dir);
fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
+ iinfo = UDF_I(dir);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
&epos, &eloc, &elen, &offset)
@@ -121,9 +116,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
}
while (nf_pos < size) {
- struct kernel_lb_addr tloc;
-
- ctx->pos = (nf_pos >> 2) + 1;
+ filp->f_pos = (nf_pos >> 2) + 1;
fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
&elen, &offset);
@@ -162,22 +155,24 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
}
if (cfi.fileCharacteristics & FID_FILE_CHAR_PARENT) {
- if (!dir_emit_dotdot(file, ctx))
- goto out;
- continue;
- }
+ iblock = parent_ino(filp->f_path.dentry);
+ flen = 2;
+ memcpy(fname, "..", flen);
+ dt_type = DT_DIR;
+ } else {
+ struct kernel_lb_addr tloc = lelb_to_cpu(cfi.icb.extLocation);
- flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
- if (!flen)
- continue;
+ iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
+ flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+ dt_type = DT_UNKNOWN;
+ }
- tloc = lelb_to_cpu(cfi.icb.extLocation);
- iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
- if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN))
+ if (flen && filldir(dirent, fname, flen, filp->f_pos,
+ iblock, dt_type) < 0)
goto out;
} /* end while */
- ctx->pos = (nf_pos >> 2) + 1;
+ filp->f_pos = (nf_pos >> 2) + 1;
out:
if (fibh.sbh != fibh.ebh)
@@ -189,11 +184,27 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
return ret;
}
+static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+ struct inode *dir = file_inode(filp);
+ int result;
+
+ if (filp->f_pos == 0) {
+ if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
+ return 0;
+ }
+ filp->f_pos++;
+ }
+
+ result = do_udf_readdir(dir, filp, filldir, dirent);
+ return result;
+}
+
/* readdir and lookup functions */
const struct file_operations udf_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = udf_readdir,
+ .readdir = udf_readdir,
.unlocked_ioctl = udf_ioctl,
.fsync = generic_file_fsync,
};
diff --git a/trunk/fs/ufs/dir.c b/trunk/fs/ufs/dir.c
index 0ecc2cebed8f..3a75ca09c506 100644
--- a/trunk/fs/ufs/dir.c
+++ b/trunk/fs/ufs/dir.c
@@ -430,16 +430,16 @@ ufs_validate_entry(struct super_block *sb, char *base,
* This is blatantly stolen from ext2fs
*/
static int
-ufs_readdir(struct file *file, struct dir_context *ctx)
+ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- loff_t pos = ctx->pos;
- struct inode *inode = file_inode(file);
+ loff_t pos = filp->f_pos;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
unsigned int offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
unsigned long npages = ufs_dir_pages(inode);
unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
- int need_revalidate = file->f_version != inode->i_version;
+ int need_revalidate = filp->f_version != inode->i_version;
unsigned flags = UFS_SB(sb)->s_flags;
UFSD("BEGIN\n");
@@ -457,16 +457,16 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
ufs_error(sb, __func__,
"bad page in #%lu",
inode->i_ino);
- ctx->pos += PAGE_CACHE_SIZE - offset;
+ filp->f_pos += PAGE_CACHE_SIZE - offset;
return -EIO;
}
kaddr = page_address(page);
if (unlikely(need_revalidate)) {
if (offset) {
offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
- ctx->pos = (n<f_pos = (n<f_version = inode->i_version;
+ filp->f_version = inode->i_version;
need_revalidate = 0;
}
de = (struct ufs_dir_entry *)(kaddr+offset);
@@ -479,8 +479,11 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
return -EIO;
}
if (de->d_ino) {
+ int over;
unsigned char d_type = DT_UNKNOWN;
+ offset = (char *)de - kaddr;
+
UFSD("filldir(%s,%u)\n", de->d_name,
fs32_to_cpu(sb, de->d_ino));
UFSD("namlen %u\n", ufs_get_de_namlen(sb, de));
@@ -488,15 +491,16 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
d_type = de->d_u.d_44.d_type;
- if (!dir_emit(ctx, de->d_name,
+ over = filldir(dirent, de->d_name,
ufs_get_de_namlen(sb, de),
- fs32_to_cpu(sb, de->d_ino),
- d_type)) {
+ (n<d_ino), d_type);
+ if (over) {
ufs_put_page(page);
return 0;
}
}
- ctx->pos += fs16_to_cpu(sb, de->d_reclen);
+ filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
}
ufs_put_page(page);
}
@@ -656,7 +660,7 @@ int ufs_empty_dir(struct inode * inode)
const struct file_operations ufs_dir_operations = {
.read = generic_read_dir,
- .iterate = ufs_readdir,
+ .readdir = ufs_readdir,
.fsync = generic_file_fsync,
.llseek = generic_file_llseek,
};
diff --git a/trunk/fs/xfs/xfs_aops.c b/trunk/fs/xfs/xfs_aops.c
index 596ec71da00e..41a695048be7 100644
--- a/trunk/fs/xfs/xfs_aops.c
+++ b/trunk/fs/xfs/xfs_aops.c
@@ -843,12 +843,10 @@ xfs_cluster_write(
STATIC void
xfs_vm_invalidatepage(
struct page *page,
- unsigned int offset,
- unsigned int length)
+ unsigned long offset)
{
- trace_xfs_invalidatepage(page->mapping->host, page, offset,
- length);
- block_invalidatepage(page, offset, length);
+ trace_xfs_invalidatepage(page->mapping->host, page, offset);
+ block_invalidatepage(page, offset);
}
/*
@@ -912,7 +910,7 @@ xfs_aops_discard_page(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_invalidate:
- xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ xfs_vm_invalidatepage(page, 0);
return;
}
@@ -942,7 +940,7 @@ xfs_vm_writepage(
int count = 0;
int nonblocking = 0;
- trace_xfs_writepage(inode, page, 0, 0);
+ trace_xfs_writepage(inode, page, 0);
ASSERT(page_has_buffers(page));
@@ -1173,7 +1171,7 @@ xfs_vm_releasepage(
{
int delalloc, unwritten;
- trace_xfs_releasepage(page->mapping->host, page, 0, 0);
+ trace_xfs_releasepage(page->mapping->host, page, 0);
xfs_count_page_state(page, &delalloc, &unwritten);
diff --git a/trunk/fs/xfs/xfs_attr_leaf.h b/trunk/fs/xfs/xfs_attr_leaf.h
index 444a7704596c..f9d7846097e2 100644
--- a/trunk/fs/xfs/xfs_attr_leaf.h
+++ b/trunk/fs/xfs/xfs_attr_leaf.h
@@ -128,7 +128,6 @@ struct xfs_attr3_leaf_hdr {
__u8 holes;
__u8 pad1;
struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
- __be32 pad2; /* 64 bit alignment */
};
#define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc))
diff --git a/trunk/fs/xfs/xfs_btree.c b/trunk/fs/xfs/xfs_btree.c
index 0903960410a2..8804b8a3c310 100644
--- a/trunk/fs/xfs/xfs_btree.c
+++ b/trunk/fs/xfs/xfs_btree.c
@@ -2544,17 +2544,7 @@ xfs_btree_new_iroot(
if (error)
goto error0;
- /*
- * we can't just memcpy() the root in for CRC enabled btree blocks.
- * In that case have to also ensure the blkno remains correct
- */
memcpy(cblock, block, xfs_btree_block_len(cur));
- if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
- if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
- cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
- else
- cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
- }
be16_add_cpu(&block->bb_level, 1);
xfs_btree_set_numrecs(block, 1);
diff --git a/trunk/fs/xfs/xfs_dir2.c b/trunk/fs/xfs/xfs_dir2.c
index 8f023dee404d..b26a50f9921d 100644
--- a/trunk/fs/xfs/xfs_dir2.c
+++ b/trunk/fs/xfs/xfs_dir2.c
@@ -368,8 +368,10 @@ xfs_dir_removename(
int
xfs_readdir(
xfs_inode_t *dp,
- struct dir_context *ctx,
- size_t bufsize)
+ void *dirent,
+ size_t bufsize,
+ xfs_off_t *offset,
+ filldir_t filldir)
{
int rval; /* return value */
int v; /* type-checking value */
@@ -383,13 +385,14 @@ xfs_readdir(
XFS_STATS_INC(xs_dir_getdents);
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
- rval = xfs_dir2_sf_getdents(dp, ctx);
+ rval = xfs_dir2_sf_getdents(dp, dirent, offset, filldir);
else if ((rval = xfs_dir2_isblock(NULL, dp, &v)))
;
else if (v)
- rval = xfs_dir2_block_getdents(dp, ctx);
+ rval = xfs_dir2_block_getdents(dp, dirent, offset, filldir);
else
- rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize);
+ rval = xfs_dir2_leaf_getdents(dp, dirent, bufsize, offset,
+ filldir);
return rval;
}
diff --git a/trunk/fs/xfs/xfs_dir2_block.c b/trunk/fs/xfs/xfs_dir2_block.c
index 09aea0247d96..e59f5fc816fe 100644
--- a/trunk/fs/xfs/xfs_dir2_block.c
+++ b/trunk/fs/xfs/xfs_dir2_block.c
@@ -569,7 +569,9 @@ xfs_dir2_block_addname(
int /* error */
xfs_dir2_block_getdents(
xfs_inode_t *dp, /* incore inode */
- struct dir_context *ctx)
+ void *dirent,
+ xfs_off_t *offset,
+ filldir_t filldir)
{
xfs_dir2_data_hdr_t *hdr; /* block header */
struct xfs_buf *bp; /* buffer for block */
@@ -587,7 +589,7 @@ xfs_dir2_block_getdents(
/*
* If the block number in the offset is out of range, we're done.
*/
- if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk)
+ if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk)
return 0;
error = xfs_dir3_block_read(NULL, dp, &bp);
@@ -598,7 +600,7 @@ xfs_dir2_block_getdents(
* Extract the byte offset we start at from the seek pointer.
* We'll skip entries before this.
*/
- wantoff = xfs_dir2_dataptr_to_off(mp, ctx->pos);
+ wantoff = xfs_dir2_dataptr_to_off(mp, *offset);
hdr = bp->b_addr;
xfs_dir3_data_check(dp, bp);
/*
@@ -637,12 +639,13 @@ xfs_dir2_block_getdents(
cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
(char *)dep - (char *)hdr);
- ctx->pos = cook & 0x7fffffff;
/*
* If it didn't fit, set the final offset to here & return.
*/
- if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
- be64_to_cpu(dep->inumber), DT_UNKNOWN)) {
+ if (filldir(dirent, (char *)dep->name, dep->namelen,
+ cook & 0x7fffffff, be64_to_cpu(dep->inumber),
+ DT_UNKNOWN)) {
+ *offset = cook & 0x7fffffff;
xfs_trans_brelse(NULL, bp);
return 0;
}
@@ -652,7 +655,7 @@ xfs_dir2_block_getdents(
* Reached the end of the block.
* Set the offset to a non-existent block 1 and return.
*/
- ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
+ *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
0x7fffffff;
xfs_trans_brelse(NULL, bp);
return 0;
diff --git a/trunk/fs/xfs/xfs_dir2_format.h b/trunk/fs/xfs/xfs_dir2_format.h
index 7826782b8d78..995f1f505a52 100644
--- a/trunk/fs/xfs/xfs_dir2_format.h
+++ b/trunk/fs/xfs/xfs_dir2_format.h
@@ -266,7 +266,6 @@ struct xfs_dir3_blk_hdr {
struct xfs_dir3_data_hdr {
struct xfs_dir3_blk_hdr hdr;
xfs_dir2_data_free_t best_free[XFS_DIR2_DATA_FD_COUNT];
- __be32 pad; /* 64 bit alignment */
};
#define XFS_DIR3_DATA_CRC_OFF offsetof(struct xfs_dir3_data_hdr, hdr.crc)
@@ -478,7 +477,7 @@ struct xfs_dir3_leaf_hdr {
struct xfs_da3_blkinfo info; /* header for da routines */
__be16 count; /* count of entries */
__be16 stale; /* count of stale entries */
- __be32 pad; /* 64 bit alignment */
+ __be32 pad;
};
struct xfs_dir3_icleaf_hdr {
@@ -716,7 +715,7 @@ struct xfs_dir3_free_hdr {
__be32 firstdb; /* db of first entry */
__be32 nvalid; /* count of valid entries */
__be32 nused; /* count of used entries */
- __be32 pad; /* 64 bit alignment */
+ __be32 pad; /* 64 bit alignment. */
};
struct xfs_dir3_free {
diff --git a/trunk/fs/xfs/xfs_dir2_leaf.c b/trunk/fs/xfs/xfs_dir2_leaf.c
index e0cc1243a8aa..da71a1819d78 100644
--- a/trunk/fs/xfs/xfs_dir2_leaf.c
+++ b/trunk/fs/xfs/xfs_dir2_leaf.c
@@ -1300,8 +1300,10 @@ xfs_dir2_leaf_readbuf(
int /* error */
xfs_dir2_leaf_getdents(
xfs_inode_t *dp, /* incore directory inode */
- struct dir_context *ctx,
- size_t bufsize)
+ void *dirent,
+ size_t bufsize,
+ xfs_off_t *offset,
+ filldir_t filldir)
{
struct xfs_buf *bp = NULL; /* data block buffer */
xfs_dir2_data_hdr_t *hdr; /* data block header */
@@ -1320,7 +1322,7 @@ xfs_dir2_leaf_getdents(
* If the offset is at or past the largest allowed value,
* give up right away.
*/
- if (ctx->pos >= XFS_DIR2_MAX_DATAPTR)
+ if (*offset >= XFS_DIR2_MAX_DATAPTR)
return 0;
mp = dp->i_mount;
@@ -1341,7 +1343,7 @@ xfs_dir2_leaf_getdents(
* Inside the loop we keep the main offset value as a byte offset
* in the directory file.
*/
- curoff = xfs_dir2_dataptr_to_byte(mp, ctx->pos);
+ curoff = xfs_dir2_dataptr_to_byte(mp, *offset);
/*
* Force this conversion through db so we truncate the offset
@@ -1442,8 +1444,8 @@ xfs_dir2_leaf_getdents(
dep = (xfs_dir2_data_entry_t *)ptr;
length = xfs_dir2_data_entsize(dep->namelen);
- ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
- if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
+ if (filldir(dirent, (char *)dep->name, dep->namelen,
+ xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
be64_to_cpu(dep->inumber), DT_UNKNOWN))
break;
@@ -1460,9 +1462,9 @@ xfs_dir2_leaf_getdents(
* All done. Set output offset value to current offset.
*/
if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR))
- ctx->pos = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
+ *offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
else
- ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
+ *offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
kmem_free(map_info);
if (bp)
xfs_trans_brelse(NULL, bp);
diff --git a/trunk/fs/xfs/xfs_dir2_priv.h b/trunk/fs/xfs/xfs_dir2_priv.h
index 0511cda4a712..7cf573c88aad 100644
--- a/trunk/fs/xfs/xfs_dir2_priv.h
+++ b/trunk/fs/xfs/xfs_dir2_priv.h
@@ -33,8 +33,8 @@ extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
extern const struct xfs_buf_ops xfs_dir3_block_buf_ops;
extern int xfs_dir2_block_addname(struct xfs_da_args *args);
-extern int xfs_dir2_block_getdents(struct xfs_inode *dp,
- struct dir_context *ctx);
+extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent,
+ xfs_off_t *offset, filldir_t filldir);
extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
extern int xfs_dir2_block_removename(struct xfs_da_args *args);
extern int xfs_dir2_block_replace(struct xfs_da_args *args);
@@ -91,8 +91,8 @@ extern void xfs_dir3_leaf_compact(struct xfs_da_args *args,
extern void xfs_dir3_leaf_compact_x1(struct xfs_dir3_icleaf_hdr *leafhdr,
struct xfs_dir2_leaf_entry *ents, int *indexp,
int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
-extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, struct dir_context *ctx,
- size_t bufsize);
+extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent,
+ size_t bufsize, xfs_off_t *offset, filldir_t filldir);
extern int xfs_dir3_leaf_get_buf(struct xfs_da_args *args, xfs_dir2_db_t bno,
struct xfs_buf **bpp, __uint16_t magic);
extern void xfs_dir3_leaf_log_ents(struct xfs_trans *tp, struct xfs_buf *bp,
@@ -153,7 +153,8 @@ extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp,
int size, xfs_dir2_sf_hdr_t *sfhp);
extern int xfs_dir2_sf_addname(struct xfs_da_args *args);
extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
-extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, struct dir_context *ctx);
+extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, void *dirent,
+ xfs_off_t *offset, filldir_t filldir);
extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
diff --git a/trunk/fs/xfs/xfs_dir2_sf.c b/trunk/fs/xfs/xfs_dir2_sf.c
index 97676a347da1..6157424dbf8f 100644
--- a/trunk/fs/xfs/xfs_dir2_sf.c
+++ b/trunk/fs/xfs/xfs_dir2_sf.c
@@ -768,7 +768,9 @@ xfs_dir2_sf_create(
int /* error */
xfs_dir2_sf_getdents(
xfs_inode_t *dp, /* incore directory inode */
- struct dir_context *ctx)
+ void *dirent,
+ xfs_off_t *offset,
+ filldir_t filldir)
{
int i; /* shortform entry number */
xfs_mount_t *mp; /* filesystem mount point */
@@ -800,7 +802,7 @@ xfs_dir2_sf_getdents(
/*
* If the block number in the offset is out of range, we're done.
*/
- if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk)
+ if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk)
return 0;
/*
@@ -817,20 +819,22 @@ xfs_dir2_sf_getdents(
/*
* Put . entry unless we're starting past it.
*/
- if (ctx->pos <= dot_offset) {
- ctx->pos = dot_offset & 0x7fffffff;
- if (!dir_emit(ctx, ".", 1, dp->i_ino, DT_DIR))
+ if (*offset <= dot_offset) {
+ if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, dp->i_ino, DT_DIR)) {
+ *offset = dot_offset & 0x7fffffff;
return 0;
+ }
}
/*
* Put .. entry unless we're starting past it.
*/
- if (ctx->pos <= dotdot_offset) {
+ if (*offset <= dotdot_offset) {
ino = xfs_dir2_sf_get_parent_ino(sfp);
- ctx->pos = dotdot_offset & 0x7fffffff;
- if (!dir_emit(ctx, "..", 2, ino, DT_DIR))
+ if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
+ *offset = dotdot_offset & 0x7fffffff;
return 0;
+ }
}
/*
@@ -841,20 +845,21 @@ xfs_dir2_sf_getdents(
off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
xfs_dir2_sf_get_offset(sfep));
- if (ctx->pos > off) {
+ if (*offset > off) {
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
continue;
}
ino = xfs_dir2_sfe_get_ino(sfp, sfep);
- ctx->pos = off & 0x7fffffff;
- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen,
- ino, DT_UNKNOWN))
+ if (filldir(dirent, (char *)sfep->name, sfep->namelen,
+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
+ *offset = off & 0x7fffffff;
return 0;
+ }
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
}
- ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
+ *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
0x7fffffff;
return 0;
}
diff --git a/trunk/fs/xfs/xfs_file.c b/trunk/fs/xfs/xfs_file.c
index 0ad2b95fca12..a5f2042aec8b 100644
--- a/trunk/fs/xfs/xfs_file.c
+++ b/trunk/fs/xfs/xfs_file.c
@@ -906,10 +906,11 @@ xfs_file_release(
STATIC int
xfs_file_readdir(
- struct file *file,
- struct dir_context *ctx)
+ struct file *filp,
+ void *dirent,
+ filldir_t filldir)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(filp);
xfs_inode_t *ip = XFS_I(inode);
int error;
size_t bufsize;
@@ -928,7 +929,8 @@ xfs_file_readdir(
*/
bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
- error = xfs_readdir(ip, ctx, bufsize);
+ error = xfs_readdir(ip, dirent, bufsize,
+ (xfs_off_t *)&filp->f_pos, filldir);
if (error)
return -error;
return 0;
@@ -1430,7 +1432,7 @@ const struct file_operations xfs_file_operations = {
const struct file_operations xfs_dir_file_operations = {
.open = xfs_dir_open,
.read = generic_read_dir,
- .iterate = xfs_file_readdir,
+ .readdir = xfs_file_readdir,
.llseek = generic_file_llseek,
.unlocked_ioctl = xfs_file_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/trunk/fs/xfs/xfs_log_recover.c b/trunk/fs/xfs/xfs_log_recover.c
index 7cf5e4eafe28..45a85ff84da1 100644
--- a/trunk/fs/xfs/xfs_log_recover.c
+++ b/trunk/fs/xfs/xfs_log_recover.c
@@ -1845,13 +1845,7 @@ xlog_recover_do_inode_buffer(
xfs_agino_t *buffer_nextp;
trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
-
- /*
- * Post recovery validation only works properly on CRC enabled
- * filesystems.
- */
- if (xfs_sb_version_hascrc(&mp->m_sb))
- bp->b_ops = &xfs_inode_buf_ops;
+ bp->b_ops = &xfs_inode_buf_ops;
inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
for (i = 0; i < inodes_per_buf; i++) {
@@ -2211,16 +2205,7 @@ xlog_recover_do_reg_buffer(
/* Shouldn't be any more regions */
ASSERT(i == item->ri_total);
- /*
- * We can only do post recovery validation on items on CRC enabled
- * fielsystems as we need to know when the buffer was written to be able
- * to determine if we should have replayed the item. If we replay old
- * metadata over a newer buffer, then it will enter a temporarily
- * inconsistent state resulting in verification failures. Hence for now
- * just avoid the verification stage for non-crc filesystems
- */
- if (xfs_sb_version_hascrc(&mp->m_sb))
- xlog_recovery_validate_buf_type(mp, bp, buf_f);
+ xlog_recovery_validate_buf_type(mp, bp, buf_f);
}
/*
diff --git a/trunk/fs/xfs/xfs_mount.c b/trunk/fs/xfs/xfs_mount.c
index e8e310c05097..f6bfbd734669 100644
--- a/trunk/fs/xfs/xfs_mount.c
+++ b/trunk/fs/xfs/xfs_mount.c
@@ -314,8 +314,7 @@ STATIC int
xfs_mount_validate_sb(
xfs_mount_t *mp,
xfs_sb_t *sbp,
- bool check_inprogress,
- bool check_version)
+ bool check_inprogress)
{
/*
@@ -338,10 +337,9 @@ xfs_mount_validate_sb(
/*
* Version 5 superblock feature mask validation. Reject combinations the
- * kernel cannot support up front before checking anything else. For
- * write validation, we don't need to check feature masks.
+ * kernel cannot support up front before checking anything else.
*/
- if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
+ if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
xfs_alert(mp,
"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
"Use of these features in this kernel is at your own risk!");
@@ -677,8 +675,7 @@ xfs_sb_to_disk(
static int
xfs_sb_verify(
- struct xfs_buf *bp,
- bool check_version)
+ struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_sb sb;
@@ -689,8 +686,7 @@ xfs_sb_verify(
* Only check the in progress field for the primary superblock as
* mkfs.xfs doesn't clear it from secondary superblocks.
*/
- return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
- check_version);
+ return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR);
}
/*
@@ -723,7 +719,7 @@ xfs_sb_read_verify(
goto out_error;
}
}
- error = xfs_sb_verify(bp, true);
+ error = xfs_sb_verify(bp);
out_error:
if (error) {
@@ -762,7 +758,7 @@ xfs_sb_write_verify(
struct xfs_buf_log_item *bip = bp->b_fspriv;
int error;
- error = xfs_sb_verify(bp, false);
+ error = xfs_sb_verify(bp);
if (error) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, error);
diff --git a/trunk/fs/xfs/xfs_trace.h b/trunk/fs/xfs/xfs_trace.h
index a04701de6bbd..aa4db3307d36 100644
--- a/trunk/fs/xfs/xfs_trace.h
+++ b/trunk/fs/xfs/xfs_trace.h
@@ -974,16 +974,14 @@ DEFINE_RW_EVENT(xfs_file_splice_read);
DEFINE_RW_EVENT(xfs_file_splice_write);
DECLARE_EVENT_CLASS(xfs_page_class,
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
- unsigned int len),
- TP_ARGS(inode, page, off, len),
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off),
+ TP_ARGS(inode, page, off),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(pgoff_t, pgoff)
__field(loff_t, size)
__field(unsigned long, offset)
- __field(unsigned int, length)
__field(int, delalloc)
__field(int, unwritten)
),
@@ -997,27 +995,24 @@ DECLARE_EVENT_CLASS(xfs_page_class,
__entry->pgoff = page_offset(page);
__entry->size = i_size_read(inode);
__entry->offset = off;
- __entry->length = len;
__entry->delalloc = delalloc;
__entry->unwritten = unwritten;
),
TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
- "length %x delalloc %d unwritten %d",
+ "delalloc %d unwritten %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->pgoff,
__entry->size,
__entry->offset,
- __entry->length,
__entry->delalloc,
__entry->unwritten)
)
#define DEFINE_PAGE_EVENT(name) \
DEFINE_EVENT(xfs_page_class, name, \
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
- unsigned int len), \
- TP_ARGS(inode, page, off, len))
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
+ TP_ARGS(inode, page, off))
DEFINE_PAGE_EVENT(xfs_writepage);
DEFINE_PAGE_EVENT(xfs_releasepage);
DEFINE_PAGE_EVENT(xfs_invalidatepage);
diff --git a/trunk/fs/xfs/xfs_vnodeops.h b/trunk/fs/xfs/xfs_vnodeops.h
index 38c67c34d73f..5163022d9808 100644
--- a/trunk/fs/xfs/xfs_vnodeops.h
+++ b/trunk/fs/xfs/xfs_vnodeops.h
@@ -31,7 +31,8 @@ int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode *ip);
int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
struct xfs_name *target_name);
-int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, size_t bufsize);
+int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize,
+ xfs_off_t *offset, filldir_t filldir);
int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
const char *target_path, umode_t mode, struct xfs_inode **ipp);
int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
diff --git a/trunk/include/acpi/acpi_bus.h b/trunk/include/acpi/acpi_bus.h
index c13c919ab99e..636c59f2003a 100644
--- a/trunk/include/acpi/acpi_bus.h
+++ b/trunk/include/acpi/acpi_bus.h
@@ -382,7 +382,6 @@ const char *acpi_power_state_string(int state);
int acpi_device_get_power(struct acpi_device *device, int *state);
int acpi_device_set_power(struct acpi_device *device, int state);
int acpi_bus_init_power(struct acpi_device *device);
-int acpi_device_fix_up_power(struct acpi_device *device);
int acpi_bus_update_power(acpi_handle handle, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
diff --git a/trunk/include/acpi/acpi_drivers.h b/trunk/include/acpi/acpi_drivers.h
index b420939f5eb5..e6168a24b9f0 100644
--- a/trunk/include/acpi/acpi_drivers.h
+++ b/trunk/include/acpi/acpi_drivers.h
@@ -123,9 +123,7 @@ extern int register_dock_notifier(struct notifier_block *nb);
extern void unregister_dock_notifier(struct notifier_block *nb);
extern int register_hotplug_dock_device(acpi_handle handle,
const struct acpi_dock_ops *ops,
- void *context,
- void (*init)(void *),
- void (*release)(void *));
+ void *context);
extern void unregister_hotplug_dock_device(acpi_handle handle);
#else
static inline int is_dock_device(acpi_handle handle)
@@ -141,9 +139,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
}
static inline int register_hotplug_dock_device(acpi_handle handle,
const struct acpi_dock_ops *ops,
- void *context,
- void (*init)(void *),
- void (*release)(void *))
+ void *context)
{
return -ENODEV;
}
diff --git a/trunk/include/asm-generic/pgtable.h b/trunk/include/asm-generic/pgtable.h
index b1836987d506..a59ff51b0166 100644
--- a/trunk/include/asm-generic/pgtable.h
+++ b/trunk/include/asm-generic/pgtable.h
@@ -692,8 +692,4 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
#endif /* !__ASSEMBLY__ */
-#ifndef io_remap_pfn_range
-#define io_remap_pfn_range remap_pfn_range
-#endif
-
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/trunk/include/linux/buffer_head.h b/trunk/include/linux/buffer_head.h
index f5a3b838ddb0..9e52b0626b39 100644
--- a/trunk/include/linux/buffer_head.h
+++ b/trunk/include/linux/buffer_head.h
@@ -198,8 +198,7 @@ extern int buffer_heads_over_limit;
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
-void block_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
+void block_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int block_write_full_page_endio(struct page *page, get_block_t *get_block,
diff --git a/trunk/include/linux/context_tracking.h b/trunk/include/linux/context_tracking.h
index fc09d7b0dacf..365f4a61bf04 100644
--- a/trunk/include/linux/context_tracking.h
+++ b/trunk/include/linux/context_tracking.h
@@ -3,7 +3,6 @@
#include
#include
-#include
#include
struct context_tracking {
@@ -20,26 +19,6 @@ struct context_tracking {
} state;
};
-static inline void __guest_enter(void)
-{
- /*
- * This is running in ioctl context so we can avoid
- * the call to vtime_account() with its unnecessary idle check.
- */
- vtime_account_system(current);
- current->flags |= PF_VCPU;
-}
-
-static inline void __guest_exit(void)
-{
- /*
- * This is running in ioctl context so we can avoid
- * the call to vtime_account() with its unnecessary idle check.
- */
- vtime_account_system(current);
- current->flags &= ~PF_VCPU;
-}
-
#ifdef CONFIG_CONTEXT_TRACKING
DECLARE_PER_CPU(struct context_tracking, context_tracking);
@@ -56,9 +35,6 @@ static inline bool context_tracking_active(void)
extern void user_enter(void);
extern void user_exit(void);
-extern void guest_enter(void);
-extern void guest_exit(void);
-
static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
@@ -81,17 +57,6 @@ extern void context_tracking_task_switch(struct task_struct *prev,
static inline bool context_tracking_in_user(void) { return false; }
static inline void user_enter(void) { }
static inline void user_exit(void) { }
-
-static inline void guest_enter(void)
-{
- __guest_enter();
-}
-
-static inline void guest_exit(void)
-{
- __guest_exit();
-}
-
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline void context_tracking_task_switch(struct task_struct *prev,
diff --git a/trunk/include/linux/f2fs_fs.h b/trunk/include/linux/f2fs_fs.h
index 383d5e39b280..df6fab82f87e 100644
--- a/trunk/include/linux/f2fs_fs.h
+++ b/trunk/include/linux/f2fs_fs.h
@@ -20,8 +20,8 @@
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
-#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
-#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
+#define NULL_ADDR 0x0U
+#define NEW_ADDR -1U
#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h
index f8a5240541b7..43db02e9c9fa 100644
--- a/trunk/include/linux/fs.h
+++ b/trunk/include/linux/fs.h
@@ -364,7 +364,7 @@ struct address_space_operations {
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
- void (*invalidatepage) (struct page *, unsigned int, unsigned int);
+ void (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@@ -1506,11 +1506,6 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
* to have different dirent layouts depending on the binary type.
*/
typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
-struct dir_context {
- const filldir_t actor;
- loff_t pos;
-};
-
struct block_device_operations;
/* These macros are for out of kernel modules to test that
@@ -1526,7 +1521,7 @@ struct file_operations {
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
- int (*iterate) (struct file *, struct dir_context *);
+ int (*readdir) (struct file *, void *, filldir_t);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
@@ -2419,6 +2414,8 @@ extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
struct file *out, loff_t *, size_t len, unsigned int flags);
+extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ size_t len, unsigned int flags);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
@@ -2499,7 +2496,6 @@ loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
extern int vfs_readdir(struct file *, filldir_t, void *);
-extern int iterate_dir(struct file *, struct dir_context *);
extern int vfs_stat(const char __user *, struct kstat *);
extern int vfs_lstat(const char __user *, struct kstat *);
@@ -2530,7 +2526,7 @@ extern void iterate_supers_type(struct file_system_type *,
extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
-extern int dcache_readdir(struct file *, struct dir_context *);
+extern int dcache_readdir(struct file *, void *, filldir_t);
extern int simple_setattr(struct dentry *, struct iattr *);
extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int simple_statfs(struct dentry *, struct kstatfs *);
@@ -2694,41 +2690,4 @@ static inline void inode_has_no_xattr(struct inode *inode)
inode->i_flags |= S_NOSEC;
}
-static inline bool dir_emit(struct dir_context *ctx,
- const char *name, int namelen,
- u64 ino, unsigned type)
-{
- return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
-}
-static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
-{
- return ctx->actor(ctx, ".", 1, ctx->pos,
- file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
-}
-static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
-{
- return ctx->actor(ctx, "..", 2, ctx->pos,
- parent_ino(file->f_path.dentry), DT_DIR) == 0;
-}
-static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
-{
- if (ctx->pos == 0) {
- if (!dir_emit_dot(file, ctx))
- return false;
- ctx->pos = 1;
- }
- if (ctx->pos == 1) {
- if (!dir_emit_dotdot(file, ctx))
- return false;
- ctx->pos = 2;
- }
- return true;
-}
-static inline bool dir_relax(struct inode *inode)
-{
- mutex_unlock(&inode->i_mutex);
- mutex_lock(&inode->i_mutex);
- return !IS_DEADDIR(inode);
-}
-
#endif /* _LINUX_FS_H */
diff --git a/trunk/include/linux/if_vlan.h b/trunk/include/linux/if_vlan.h
index 637fa71de0c7..52bd03b38962 100644
--- a/trunk/include/linux/if_vlan.h
+++ b/trunk/include/linux/if_vlan.h
@@ -44,7 +44,7 @@ struct vlan_hdr {
* struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
* @h_dest: destination ethernet address
* @h_source: source ethernet address
- * @h_vlan_proto: ethernet protocol
+ * @h_vlan_proto: ethernet protocol (always 0x8100)
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
diff --git a/trunk/include/linux/jbd.h b/trunk/include/linux/jbd.h
index 8685d1be12c7..7e0b622503c4 100644
--- a/trunk/include/linux/jbd.h
+++ b/trunk/include/linux/jbd.h
@@ -27,6 +27,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -243,31 +244,6 @@ typedef struct journal_superblock_s
#include
#include
-
-enum jbd_state_bits {
- BH_JBD /* Has an attached ext3 journal_head */
- = BH_PrivateStart,
- BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
- BH_Freed, /* Has been freed (truncated) */
- BH_Revoked, /* Has been revoked from the log */
- BH_RevokeValid, /* Revoked flag is valid */
- BH_JBDDirty, /* Is dirty but journaled */
- BH_State, /* Pins most journal_head state */
- BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
- BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
- BH_JBDPrivateStart, /* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-
#include
#define J_ASSERT(assert) BUG_ON(!(assert))
@@ -864,7 +840,7 @@ extern void journal_release_buffer (handle_t *, struct buffer_head *);
extern int journal_forget (handle_t *, struct buffer_head *);
extern void journal_sync_buffer (struct buffer_head *);
extern void journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
+ struct page *, unsigned long);
extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
extern int journal_stop(handle_t *);
extern int journal_flush (journal_t *);
diff --git a/trunk/include/linux/jbd2.h b/trunk/include/linux/jbd2.h
index d5b50a19463c..6e051f472edb 100644
--- a/trunk/include/linux/jbd2.h
+++ b/trunk/include/linux/jbd2.h
@@ -26,6 +26,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -56,13 +57,17 @@
*/
#define JBD2_EXPENSIVE_CHECKING
extern ushort jbd2_journal_enable_debug;
-void __jbd2_debug(int level, const char *file, const char *func,
- unsigned int line, const char *fmt, ...);
-#define jbd_debug(n, fmt, a...) \
- __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
+#define jbd_debug(n, f, a...) \
+ do { \
+ if ((n) <= jbd2_journal_enable_debug) { \
+ printk (KERN_DEBUG "(%s, %d): %s: ", \
+ __FILE__, __LINE__, __func__); \
+ printk (f, ## a); \
+ } \
+ } while (0)
#else
-#define jbd_debug(n, fmt, a...) /**/
+#define jbd_debug(f, a...) /**/
#endif
extern void *jbd2_alloc(size_t size, gfp_t flags);
@@ -297,34 +302,6 @@ typedef struct journal_superblock_s
#include
#include
-
-enum jbd_state_bits {
- BH_JBD /* Has an attached ext3 journal_head */
- = BH_PrivateStart,
- BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
- BH_Freed, /* Has been freed (truncated) */
- BH_Revoked, /* Has been revoked from the log */
- BH_RevokeValid, /* Revoked flag is valid */
- BH_JBDDirty, /* Is dirty but journaled */
- BH_State, /* Pins most journal_head state */
- BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
- BH_Shadow, /* IO on shadow buffer is running */
- BH_Verified, /* Metadata block has been verified ok */
- BH_JBDPrivateStart, /* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-BUFFER_FNS(Shadow, shadow)
-BUFFER_FNS(Verified, verified)
-
#include
#define J_ASSERT(assert) BUG_ON(!(assert))
@@ -405,15 +382,8 @@ struct jbd2_revoke_table_s;
struct jbd2_journal_handle
{
- union {
- /* Which compound transaction is this update a part of? */
- transaction_t *h_transaction;
- /* Which journal handle belongs to - used iff h_reserved set */
- journal_t *h_journal;
- };
-
- /* Handle reserved for finishing the logical operation */
- handle_t *h_rsv_handle;
+ /* Which compound transaction is this update a part of? */
+ transaction_t *h_transaction;
/* Number of remaining buffers we are allowed to dirty: */
int h_buffer_credits;
@@ -428,7 +398,6 @@ struct jbd2_journal_handle
/* Flags [no locking] */
unsigned int h_sync: 1; /* sync-on-close */
unsigned int h_jdata: 1; /* force data journaling */
- unsigned int h_reserved: 1; /* handle with reserved credits */
unsigned int h_aborted: 1; /* fatal error on handle */
unsigned int h_type: 8; /* for handle statistics */
unsigned int h_line_no: 16; /* for handle statistics */
@@ -554,6 +523,12 @@ struct transaction_s
*/
struct journal_head *t_checkpoint_io_list;
+ /*
+ * Doubly-linked circular list of temporary buffers currently undergoing
+ * IO in the log [j_list_lock]
+ */
+ struct journal_head *t_iobuf_list;
+
/*
* Doubly-linked circular list of metadata buffers being shadowed by log
* IO. The IO buffers on the iobuf list and the shadow buffers on this
@@ -561,6 +536,12 @@ struct transaction_s
*/
struct journal_head *t_shadow_list;
+ /*
+ * Doubly-linked circular list of control buffers being written to the
+ * log. [j_list_lock]
+ */
+ struct journal_head *t_log_list;
+
/*
* List of inodes whose data we've modified in data=ordered mode.
* [j_list_lock]
@@ -690,10 +671,11 @@ jbd2_time_diff(unsigned long start, unsigned long end)
* waiting for checkpointing
* @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
* to start committing, or for a barrier lock to be released
+ * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
* @j_wait_done_commit: Wait queue for waiting for commit to complete
+ * @j_wait_checkpoint: Wait queue to trigger checkpointing
* @j_wait_commit: Wait queue to trigger commit
* @j_wait_updates: Wait queue to wait for updates to complete
- * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
* @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
* @j_head: Journal head - identifies the first unused block in the journal
* @j_tail: Journal tail - identifies the oldest still-used block in the
@@ -707,7 +689,6 @@ jbd2_time_diff(unsigned long start, unsigned long end)
* journal
* @j_fs_dev: Device which holds the client fs. For internal journal this will
* be equal to j_dev
- * @j_reserved_credits: Number of buffers reserved from the running transaction
* @j_maxlen: Total maximum capacity of the journal region on disk.
* @j_list_lock: Protects the buffer lists and internal buffer state.
* @j_inode: Optional inode where we store the journal. If present, all journal
@@ -797,18 +778,21 @@ struct journal_s
*/
wait_queue_head_t j_wait_transaction_locked;
+ /* Wait queue for waiting for checkpointing to complete */
+ wait_queue_head_t j_wait_logspace;
+
/* Wait queue for waiting for commit to complete */
wait_queue_head_t j_wait_done_commit;
+ /* Wait queue to trigger checkpointing */
+ wait_queue_head_t j_wait_checkpoint;
+
/* Wait queue to trigger commit */
wait_queue_head_t j_wait_commit;
/* Wait queue to wait for updates to complete */
wait_queue_head_t j_wait_updates;
- /* Wait queue to wait for reserved buffer credits to drop */
- wait_queue_head_t j_wait_reserved;
-
/* Semaphore for locking against concurrent checkpoints */
struct mutex j_checkpoint_mutex;
@@ -863,9 +847,6 @@ struct journal_s
/* Total maximum capacity of the journal region on disk. */
unsigned int j_maxlen;
- /* Number of buffers reserved from the running transaction */
- atomic_t j_reserved_credits;
-
/*
* Protects the buffer lists and internal buffer state.
*/
@@ -1010,17 +991,9 @@ extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, i
extern void __journal_free_buffer(struct journal_head *bh);
extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
extern void __journal_clean_data_list(transaction_t *transaction);
-static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
-{
- list_add_tail(&bh->b_assoc_buffers, head);
-}
-static inline void jbd2_unfile_log_bh(struct buffer_head *bh)
-{
- list_del_init(&bh->b_assoc_buffers);
-}
/* Log buffer allocation */
-struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
+extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *);
int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
unsigned long *block);
@@ -1066,10 +1039,11 @@ extern void jbd2_buffer_abort_trigger(struct journal_head *jh,
struct jbd2_buffer_trigger_type *triggers);
/* Buffer IO */
-extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
- struct journal_head *jh_in,
- struct buffer_head **bh_out,
- sector_t blocknr);
+extern int
+jbd2_journal_write_metadata_buffer(transaction_t *transaction,
+ struct journal_head *jh_in,
+ struct journal_head **jh_out,
+ unsigned long long blocknr);
/* Transaction locking */
extern void __wait_on_journal (journal_t *);
@@ -1102,14 +1076,10 @@ static inline handle_t *journal_current_handle(void)
*/
extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
-extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks,
- gfp_t gfp_mask, unsigned int type,
- unsigned int line_no);
+extern handle_t *jbd2__journal_start(journal_t *, int nblocks, gfp_t gfp_mask,
+ unsigned int type, unsigned int line_no);
extern int jbd2_journal_restart(handle_t *, int nblocks);
extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
-extern int jbd2_journal_start_reserved(handle_t *handle,
- unsigned int type, unsigned int line_no);
-extern void jbd2_journal_free_reserved(handle_t *handle);
extern int jbd2_journal_extend (handle_t *, int nblocks);
extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
@@ -1120,7 +1090,7 @@ extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
extern void journal_sync_buffer (struct buffer_head *);
extern int jbd2_journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
+ struct page *, unsigned long);
extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
extern int jbd2_journal_stop(handle_t *);
extern int jbd2_journal_flush (journal_t *);
@@ -1155,7 +1125,6 @@ extern void jbd2_journal_ack_err (journal_t *);
extern int jbd2_journal_clear_err (journal_t *);
extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
extern int jbd2_journal_force_commit(journal_t *);
-extern int jbd2_journal_force_commit_nested(journal_t *);
extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
struct jbd2_inode *inode, loff_t new_size);
@@ -1209,10 +1178,8 @@ extern int jbd2_journal_init_revoke_caches(void);
extern void jbd2_journal_destroy_revoke(journal_t *);
extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void jbd2_journal_write_revoke_records(journal_t *journal,
- transaction_t *transaction,
- struct list_head *log_bufs,
- int write_op);
+extern void jbd2_journal_write_revoke_records(journal_t *,
+ transaction_t *, int);
/* Recovery revoke support */
extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
@@ -1228,9 +1195,11 @@ extern void jbd2_clear_buffer_revoked_flags(journal_t *journal);
* transitions on demand.
*/
+int __jbd2_log_space_left(journal_t *); /* Called with journal locked */
int jbd2_log_start_commit(journal_t *journal, tid_t tid);
int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
+int jbd2_journal_force_commit_nested(journal_t *journal);
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
int jbd2_complete_transaction(journal_t *journal, tid_t tid);
int jbd2_log_do_checkpoint(journal_t *journal);
@@ -1266,7 +1235,7 @@ static inline int is_journal_aborted(journal_t *journal)
static inline int is_handle_aborted(handle_t *handle)
{
- if (handle->h_aborted || !handle->h_transaction)
+ if (handle->h_aborted)
return 1;
return is_journal_aborted(handle->h_transaction->t_journal);
}
@@ -1296,38 +1265,17 @@ static inline int tid_geq(tid_t x, tid_t y)
extern int jbd2_journal_blocks_per_page(struct inode *inode);
extern size_t journal_tag_bytes(journal_t *journal);
-/*
- * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
- * transaction control blocks.
- */
-#define JBD2_CONTROL_BLOCKS_SHIFT 5
-
/*
* Return the minimum number of blocks which must be free in the journal
* before a new transaction may be started. Must be called under j_state_lock.
*/
-static inline int jbd2_space_needed(journal_t *journal)
+static inline int jbd_space_needed(journal_t *journal)
{
int nblocks = journal->j_max_transaction_buffers;
- return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT);
-}
-
-/*
- * Return number of free blocks in the log. Must be called under j_state_lock.
- */
-static inline unsigned long jbd2_log_space_left(journal_t *journal)
-{
- /* Allow for rounding errors */
- unsigned long free = journal->j_free - 32;
-
- if (journal->j_committing_transaction) {
- unsigned long committing = atomic_read(&journal->
- j_committing_transaction->t_outstanding_credits);
-
- /* Transaction + control blocks */
- free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
- }
- return free;
+ if (journal->j_committing_transaction)
+ nblocks += atomic_read(&journal->j_committing_transaction->
+ t_outstanding_credits);
+ return nblocks;
}
/*
@@ -1338,9 +1286,11 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_None 0 /* Not journaled */
#define BJ_Metadata 1 /* Normal journaled metadata */
#define BJ_Forget 2 /* Buffer superseded by this transaction */
-#define BJ_Shadow 3 /* Buffer contents being shadowed to the log */
-#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
-#define BJ_Types 5
+#define BJ_IO 3 /* Buffer is for temporary IO use */
+#define BJ_Shadow 4 /* Buffer contents being shadowed to the log */
+#define BJ_LogCtl 5 /* Buffer contains log descriptors */
+#define BJ_Reserved 6 /* Buffer is reserved for access by journal */
+#define BJ_Types 7
extern int jbd_blocks_per_page(struct inode *inode);
@@ -1369,19 +1319,6 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
return *(u32 *)desc.ctx;
}
-/* Return most recent uncommitted transaction */
-static inline tid_t jbd2_get_latest_transaction(journal_t *journal)
-{
- tid_t tid;
-
- read_lock(&journal->j_state_lock);
- tid = journal->j_commit_request;
- if (journal->j_running_transaction)
- tid = journal->j_running_transaction->t_tid;
- read_unlock(&journal->j_state_lock);
- return tid;
-}
-
#ifdef __KERNEL__
#define buffer_trace_init(bh) do {} while (0)
diff --git a/trunk/include/linux/jbd_common.h b/trunk/include/linux/jbd_common.h
index 3dc53432355f..6133679bc4c0 100644
--- a/trunk/include/linux/jbd_common.h
+++ b/trunk/include/linux/jbd_common.h
@@ -1,7 +1,31 @@
#ifndef _LINUX_JBD_STATE_H
#define _LINUX_JBD_STATE_H
-#include
+enum jbd_state_bits {
+ BH_JBD /* Has an attached ext3 journal_head */
+ = BH_PrivateStart,
+ BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
+ BH_Freed, /* Has been freed (truncated) */
+ BH_Revoked, /* Has been revoked from the log */
+ BH_RevokeValid, /* Revoked flag is valid */
+ BH_JBDDirty, /* Is dirty but journaled */
+ BH_State, /* Pins most journal_head state */
+ BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
+ BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
+ BH_Verified, /* Metadata block has been verified ok */
+ BH_JBDPrivateStart, /* First bit available for private use by FS */
+};
+
+BUFFER_FNS(JBD, jbd)
+BUFFER_FNS(JWrite, jwrite)
+BUFFER_FNS(JBDDirty, jbddirty)
+TAS_BUFFER_FNS(JBDDirty, jbddirty)
+BUFFER_FNS(Revoked, revoked)
+TAS_BUFFER_FNS(Revoked, revoked)
+BUFFER_FNS(RevokeValid, revokevalid)
+TAS_BUFFER_FNS(RevokeValid, revokevalid)
+BUFFER_FNS(Freed, freed)
+BUFFER_FNS(Verified, verified)
static inline struct buffer_head *jh2bh(struct journal_head *jh)
{
diff --git a/trunk/include/linux/kvm_host.h b/trunk/include/linux/kvm_host.h
index 8db53cfaccdb..f0eea07d2c2b 100644
--- a/trunk/include/linux/kvm_host.h
+++ b/trunk/include/linux/kvm_host.h
@@ -23,7 +23,6 @@
#include
#include
#include
-#include
#include
#include
@@ -761,6 +760,42 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
}
#endif
+static inline void __guest_enter(void)
+{
+ /*
+ * This is running in ioctl context so we can avoid
+ * the call to vtime_account() with its unnecessary idle check.
+ */
+ vtime_account_system(current);
+ current->flags |= PF_VCPU;
+}
+
+static inline void __guest_exit(void)
+{
+ /*
+ * This is running in ioctl context so we can avoid
+ * the call to vtime_account() with its unnecessary idle check.
+ */
+ vtime_account_system(current);
+ current->flags &= ~PF_VCPU;
+}
+
+#ifdef CONFIG_CONTEXT_TRACKING
+extern void guest_enter(void);
+extern void guest_exit(void);
+
+#else /* !CONFIG_CONTEXT_TRACKING */
+static inline void guest_enter(void)
+{
+ __guest_enter();
+}
+
+static inline void guest_exit(void)
+{
+ __guest_exit();
+}
+#endif /* !CONFIG_CONTEXT_TRACKING */
+
static inline void kvm_guest_enter(void)
{
unsigned long flags;
diff --git a/trunk/drivers/block/loop.h b/trunk/include/linux/loop.h
similarity index 98%
rename from trunk/drivers/block/loop.h
rename to trunk/include/linux/loop.h
index 90df5d6485b6..460b60fa7adf 100644
--- a/trunk/drivers/block/loop.h
+++ b/trunk/include/linux/loop.h
@@ -1,5 +1,5 @@
/*
- * loop.h
+ * include/linux/loop.h
*
* Written by Theodore Ts'o, 3/29/93.
*
diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h
index 66d881f1d576..e0c8528a41a4 100644
--- a/trunk/include/linux/mm.h
+++ b/trunk/include/linux/mm.h
@@ -1041,8 +1041,7 @@ int get_kernel_page(unsigned long start, int write, struct page **pages);
struct page *get_dump_page(unsigned long addr);
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
-extern void do_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
+extern void do_invalidatepage(struct page *page, unsigned long offset);
int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
diff --git a/trunk/include/linux/netdevice.h b/trunk/include/linux/netdevice.h
index 96e4c21e15e0..60584b185a0c 100644
--- a/trunk/include/linux/netdevice.h
+++ b/trunk/include/linux/netdevice.h
@@ -1695,7 +1695,6 @@ extern int init_dummy_netdev(struct net_device *dev);
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-extern int netdev_get_name(struct net *net, char *name, int ifindex);
extern int dev_restart(struct net_device *dev);
#ifdef CONFIG_NETPOLL_TRAP
extern int netpoll_trap(void);
diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h
index c5b6dbf9c2fc..f463a46424e2 100644
--- a/trunk/include/linux/perf_event.h
+++ b/trunk/include/linux/perf_event.h
@@ -389,7 +389,8 @@ struct perf_event {
/* mmap bits */
struct mutex mmap_mutex;
atomic_t mmap_count;
-
+ int mmap_locked;
+ struct user_struct *mmap_user;
struct ring_buffer *rb;
struct list_head rb_entry;
diff --git a/trunk/include/linux/preempt.h b/trunk/include/linux/preempt.h
index f5d4723cdb3d..87a03c746f17 100644
--- a/trunk/include/linux/preempt.h
+++ b/trunk/include/linux/preempt.h
@@ -33,25 +33,9 @@ do { \
preempt_schedule(); \
} while (0)
-#ifdef CONFIG_CONTEXT_TRACKING
-
-void preempt_schedule_context(void);
-
-#define preempt_check_resched_context() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule_context(); \
-} while (0)
-#else
-
-#define preempt_check_resched_context() preempt_check_resched()
-
-#endif /* CONFIG_CONTEXT_TRACKING */
-
#else /* !CONFIG_PREEMPT */
#define preempt_check_resched() do { } while (0)
-#define preempt_check_resched_context() do { } while (0)
#endif /* CONFIG_PREEMPT */
@@ -104,7 +88,7 @@ do { \
do { \
preempt_enable_no_resched_notrace(); \
barrier(); \
- preempt_check_resched_context(); \
+ preempt_check_resched(); \
} while (0)
#else /* !CONFIG_PREEMPT_COUNT */
diff --git a/trunk/include/linux/skbuff.h b/trunk/include/linux/skbuff.h
index dec1748cd002..9c676eae3968 100644
--- a/trunk/include/linux/skbuff.h
+++ b/trunk/include/linux/skbuff.h
@@ -627,7 +627,6 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
}
extern void kfree_skb(struct sk_buff *skb);
-extern void kfree_skb_list(struct sk_buff *segs);
extern void skb_tx_error(struct sk_buff *skb);
extern void consume_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
diff --git a/trunk/include/linux/smp.h b/trunk/include/linux/smp.h
index c8488763277f..e6564c1dc552 100644
--- a/trunk/include/linux/smp.h
+++ b/trunk/include/linux/smp.h
@@ -11,7 +11,6 @@
#include
#include
#include
-#include
extern void cpu_idle(void);
@@ -140,17 +139,13 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
}
#define smp_call_function(func, info, wait) \
(up_smp_call_function(func, info))
-
-static inline int on_each_cpu(smp_call_func_t func, void *info, int wait)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- func(info);
- local_irq_restore(flags);
- return 0;
-}
-
+#define on_each_cpu(func,info,wait) \
+ ({ \
+ local_irq_disable(); \
+ func(info); \
+ local_irq_enable(); \
+ 0; \
+ })
/*
* Note we still need to test the mask even for UP
* because we actually can get an empty mask from
diff --git a/trunk/include/linux/splice.h b/trunk/include/linux/splice.h
index 74575cbf2d6f..09a545a7dfa3 100644
--- a/trunk/include/linux/splice.h
+++ b/trunk/include/linux/splice.h
@@ -35,7 +35,6 @@ struct splice_desc {
void *data; /* cookie */
} u;
loff_t pos; /* file position */
- loff_t *opos; /* sendfile: output position */
size_t num_spliced; /* number of bytes already spliced */
bool need_wakeup; /* need to wake up writer */
};
diff --git a/trunk/include/linux/vtime.h b/trunk/include/linux/vtime.h
index b1dd2db80076..71a5782d8c59 100644
--- a/trunk/include/linux/vtime.h
+++ b/trunk/include/linux/vtime.h
@@ -34,7 +34,7 @@ static inline void vtime_user_exit(struct task_struct *tsk)
}
extern void vtime_guest_enter(struct task_struct *tsk);
extern void vtime_guest_exit(struct task_struct *tsk);
-extern void vtime_init_idle(struct task_struct *tsk, int cpu);
+extern void vtime_init_idle(struct task_struct *tsk);
#else
static inline void vtime_account_irq_exit(struct task_struct *tsk)
{
@@ -45,7 +45,7 @@ static inline void vtime_user_enter(struct task_struct *tsk) { }
static inline void vtime_user_exit(struct task_struct *tsk) { }
static inline void vtime_guest_enter(struct task_struct *tsk) { }
static inline void vtime_guest_exit(struct task_struct *tsk) { }
-static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
+static inline void vtime_init_idle(struct task_struct *tsk) { }
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
diff --git a/trunk/include/linux/writeback.h b/trunk/include/linux/writeback.h
index abfe11787af3..579a5007c696 100644
--- a/trunk/include/linux/writeback.h
+++ b/trunk/include/linux/writeback.h
@@ -78,7 +78,6 @@ struct writeback_control {
unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
- unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
};
/*
diff --git a/trunk/include/media/v4l2-mem2mem.h b/trunk/include/media/v4l2-mem2mem.h
index 0f4555b2a31b..d3eef01da648 100644
--- a/trunk/include/media/v4l2-mem2mem.h
+++ b/trunk/include/media/v4l2-mem2mem.h
@@ -110,8 +110,6 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf);
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf);
-int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
- struct v4l2_create_buffers *create);
int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_exportbuffer *eb);
diff --git a/trunk/include/net/ip_tunnels.h b/trunk/include/net/ip_tunnels.h
index 09b1360e10bf..4b6f0b28f41f 100644
--- a/trunk/include/net/ip_tunnels.h
+++ b/trunk/include/net/ip_tunnels.h
@@ -95,10 +95,10 @@ struct ip_tunnel_net {
int ip_tunnel_init(struct net_device *dev);
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
-int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
- struct rtnl_link_ops *ops, char *devname);
+int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+ struct rtnl_link_ops *ops, char *devname);
-void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
+void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params);
diff --git a/trunk/include/trace/events/ext3.h b/trunk/include/trace/events/ext3.h
index 6797b9de90ed..15d11a39be47 100644
--- a/trunk/include/trace/events/ext3.h
+++ b/trunk/include/trace/events/ext3.h
@@ -290,14 +290,13 @@ DEFINE_EVENT(ext3__page_op, ext3_releasepage,
);
TRACE_EVENT(ext3_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+ TP_PROTO(struct page *page, unsigned long offset),
- TP_ARGS(page, offset, length),
+ TP_ARGS(page, offset),
TP_STRUCT__entry(
__field( pgoff_t, index )
- __field( unsigned int, offset )
- __field( unsigned int, length )
+ __field( unsigned long, offset )
__field( ino_t, ino )
__field( dev_t, dev )
@@ -306,15 +305,14 @@ TRACE_EVENT(ext3_invalidatepage,
TP_fast_assign(
__entry->index = page->index;
__entry->offset = offset;
- __entry->length = length;
__entry->ino = page->mapping->host->i_ino;
__entry->dev = page->mapping->host->i_sb->s_dev;
),
- TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
+ TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->index, __entry->offset, __entry->length)
+ __entry->index, __entry->offset)
);
TRACE_EVENT(ext3_discard_blocks,
diff --git a/trunk/include/trace/events/ext4.h b/trunk/include/trace/events/ext4.h
index 2068db241f22..8ee15b97cd38 100644
--- a/trunk/include/trace/events/ext4.h
+++ b/trunk/include/trace/events/ext4.h
@@ -19,57 +19,6 @@ struct extent_status;
#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
-#define show_mballoc_flags(flags) __print_flags(flags, "|", \
- { EXT4_MB_HINT_MERGE, "HINT_MERGE" }, \
- { EXT4_MB_HINT_RESERVED, "HINT_RESV" }, \
- { EXT4_MB_HINT_METADATA, "HINT_MDATA" }, \
- { EXT4_MB_HINT_FIRST, "HINT_FIRST" }, \
- { EXT4_MB_HINT_BEST, "HINT_BEST" }, \
- { EXT4_MB_HINT_DATA, "HINT_DATA" }, \
- { EXT4_MB_HINT_NOPREALLOC, "HINT_NOPREALLOC" }, \
- { EXT4_MB_HINT_GROUP_ALLOC, "HINT_GRP_ALLOC" }, \
- { EXT4_MB_HINT_GOAL_ONLY, "HINT_GOAL_ONLY" }, \
- { EXT4_MB_HINT_TRY_GOAL, "HINT_TRY_GOAL" }, \
- { EXT4_MB_DELALLOC_RESERVED, "DELALLOC_RESV" }, \
- { EXT4_MB_STREAM_ALLOC, "STREAM_ALLOC" }, \
- { EXT4_MB_USE_ROOT_BLOCKS, "USE_ROOT_BLKS" }, \
- { EXT4_MB_USE_RESERVED, "USE_RESV" })
-
-#define show_map_flags(flags) __print_flags(flags, "|", \
- { EXT4_GET_BLOCKS_CREATE, "CREATE" }, \
- { EXT4_GET_BLOCKS_UNINIT_EXT, "UNINIT" }, \
- { EXT4_GET_BLOCKS_DELALLOC_RESERVE, "DELALLOC" }, \
- { EXT4_GET_BLOCKS_PRE_IO, "PRE_IO" }, \
- { EXT4_GET_BLOCKS_CONVERT, "CONVERT" }, \
- { EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \
- { EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \
- { EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \
- { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }, \
- { EXT4_GET_BLOCKS_NO_PUT_HOLE, "NO_PUT_HOLE" })
-
-#define show_mflags(flags) __print_flags(flags, "", \
- { EXT4_MAP_NEW, "N" }, \
- { EXT4_MAP_MAPPED, "M" }, \
- { EXT4_MAP_UNWRITTEN, "U" }, \
- { EXT4_MAP_BOUNDARY, "B" }, \
- { EXT4_MAP_UNINIT, "u" }, \
- { EXT4_MAP_FROM_CLUSTER, "C" })
-
-#define show_free_flags(flags) __print_flags(flags, "|", \
- { EXT4_FREE_BLOCKS_METADATA, "METADATA" }, \
- { EXT4_FREE_BLOCKS_FORGET, "FORGET" }, \
- { EXT4_FREE_BLOCKS_VALIDATED, "VALIDATED" }, \
- { EXT4_FREE_BLOCKS_NO_QUOT_UPDATE, "NO_QUOTA" }, \
- { EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER,"1ST_CLUSTER" },\
- { EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER, "LAST_CLUSTER" })
-
-#define show_extent_status(status) __print_flags(status, "", \
- { (1 << 3), "W" }, \
- { (1 << 2), "U" }, \
- { (1 << 1), "D" }, \
- { (1 << 0), "H" })
-
-
TRACE_EVENT(ext4_free_inode,
TP_PROTO(struct inode *inode),
@@ -332,7 +281,7 @@ DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
TP_ARGS(inode, pos, len, copied)
);
-TRACE_EVENT(ext4_writepages,
+TRACE_EVENT(ext4_da_writepages,
TP_PROTO(struct inode *inode, struct writeback_control *wbc),
TP_ARGS(inode, wbc),
@@ -375,62 +324,46 @@ TRACE_EVENT(ext4_writepages,
);
TRACE_EVENT(ext4_da_write_pages,
- TP_PROTO(struct inode *inode, pgoff_t first_page,
- struct writeback_control *wbc),
+ TP_PROTO(struct inode *inode, struct mpage_da_data *mpd),
- TP_ARGS(inode, first_page, wbc),
+ TP_ARGS(inode, mpd),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( pgoff_t, first_page )
- __field( long, nr_to_write )
- __field( int, sync_mode )
+ __field( __u64, b_blocknr )
+ __field( __u32, b_size )
+ __field( __u32, b_state )
+ __field( unsigned long, first_page )
+ __field( int, io_done )
+ __field( int, pages_written )
+ __field( int, sync_mode )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->first_page = first_page;
- __entry->nr_to_write = wbc->nr_to_write;
- __entry->sync_mode = wbc->sync_mode;
+ __entry->b_blocknr = mpd->b_blocknr;
+ __entry->b_size = mpd->b_size;
+ __entry->b_state = mpd->b_state;
+ __entry->first_page = mpd->first_page;
+ __entry->io_done = mpd->io_done;
+ __entry->pages_written = mpd->pages_written;
+ __entry->sync_mode = mpd->wbc->sync_mode;
),
- TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld "
- "sync_mode %d",
+ TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
+ "first_page %lu io_done %d pages_written %d sync_mode %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->first_page,
- __entry->nr_to_write, __entry->sync_mode)
-);
-
-TRACE_EVENT(ext4_da_write_pages_extent,
- TP_PROTO(struct inode *inode, struct ext4_map_blocks *map),
-
- TP_ARGS(inode, map),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( __u64, lblk )
- __field( __u32, len )
- __field( __u32, flags )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->lblk = map->m_lblk;
- __entry->len = map->m_len;
- __entry->flags = map->m_flags;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %llu len %u flags %s",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->lblk, __entry->len,
- show_mflags(__entry->flags))
+ (unsigned long) __entry->ino,
+ __entry->b_blocknr, __entry->b_size,
+ __entry->b_state, __entry->first_page,
+ __entry->io_done, __entry->pages_written,
+ __entry->sync_mode
+ )
);
-TRACE_EVENT(ext4_writepages_result,
+TRACE_EVENT(ext4_da_writepages_result,
TP_PROTO(struct inode *inode, struct writeback_control *wbc,
int ret, int pages_written),
@@ -511,16 +444,16 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage,
);
DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+ TP_PROTO(struct page *page, unsigned long offset),
- TP_ARGS(page, offset, length),
+ TP_ARGS(page, offset),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( pgoff_t, index )
- __field( unsigned int, offset )
- __field( unsigned int, length )
+ __field( unsigned long, offset )
+
),
TP_fast_assign(
@@ -528,26 +461,24 @@ DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
__entry->ino = page->mapping->host->i_ino;
__entry->index = page->index;
__entry->offset = offset;
- __entry->length = length;
),
- TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
+ TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- (unsigned long) __entry->index,
- __entry->offset, __entry->length)
+ (unsigned long) __entry->index, __entry->offset)
);
DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+ TP_PROTO(struct page *page, unsigned long offset),
- TP_ARGS(page, offset, length)
+ TP_ARGS(page, offset)
);
DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+ TP_PROTO(struct page *page, unsigned long offset),
- TP_ARGS(page, offset, length)
+ TP_ARGS(page, offset)
);
TRACE_EVENT(ext4_discard_blocks,
@@ -742,10 +673,10 @@ TRACE_EVENT(ext4_request_blocks,
__entry->flags = ar->flags;
),
- TP_printk("dev %d,%d ino %lu flags %s len %u lblk %u goal %llu "
+ TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
"lleft %u lright %u pleft %llu pright %llu ",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags),
+ (unsigned long) __entry->ino, __entry->flags,
__entry->len, __entry->logical, __entry->goal,
__entry->lleft, __entry->lright, __entry->pleft,
__entry->pright)
@@ -784,10 +715,10 @@ TRACE_EVENT(ext4_allocate_blocks,
__entry->flags = ar->flags;
),
- TP_printk("dev %d,%d ino %lu flags %s len %u block %llu lblk %u "
+ TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
"goal %llu lleft %u lright %u pleft %llu pright %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags),
+ (unsigned long) __entry->ino, __entry->flags,
__entry->len, __entry->block, __entry->logical,
__entry->goal, __entry->lleft, __entry->lright,
__entry->pleft, __entry->pright)
@@ -817,11 +748,11 @@ TRACE_EVENT(ext4_free_blocks,
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %s",
+ TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->block, __entry->count,
- show_free_flags(__entry->flags))
+ __entry->flags)
);
TRACE_EVENT(ext4_sync_file_enter,
@@ -972,7 +903,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
),
TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
- "result %u/%d/%u@%u blks %u grps %u cr %u flags %s "
+ "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
"tail %u broken %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
@@ -983,7 +914,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
__entry->result_group, __entry->result_start,
__entry->result_len, __entry->result_logical,
__entry->found, __entry->groups, __entry->cr,
- show_mballoc_flags(__entry->flags), __entry->tail,
+ __entry->flags, __entry->tail,
__entry->buddy ? 1 << __entry->buddy : 0)
);
@@ -1597,10 +1528,10 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
__entry->flags = flags;
),
- TP_printk("dev %d,%d ino %lu lblk %u len %u flags %s",
+ TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->lblk, __entry->len, show_map_flags(__entry->flags))
+ __entry->lblk, __entry->len, __entry->flags)
);
DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
@@ -1618,53 +1549,47 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
);
DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
- TP_PROTO(struct inode *inode, unsigned flags, struct ext4_map_blocks *map,
- int ret),
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
- TP_ARGS(inode, flags, map, ret),
+ TP_ARGS(inode, map, ret),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( unsigned int, flags )
__field( ext4_fsblk_t, pblk )
__field( ext4_lblk_t, lblk )
__field( unsigned int, len )
- __field( unsigned int, mflags )
+ __field( unsigned int, flags )
__field( int, ret )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->flags = flags;
__entry->pblk = map->m_pblk;
__entry->lblk = map->m_lblk;
__entry->len = map->m_len;
- __entry->mflags = map->m_flags;
+ __entry->flags = map->m_flags;
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %lu flags %s lblk %u pblk %llu len %u "
- "mflags %s ret %d",
+ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u flags %x ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- show_map_flags(__entry->flags), __entry->lblk, __entry->pblk,
- __entry->len, show_mflags(__entry->mflags), __entry->ret)
+ __entry->lblk, __entry->pblk,
+ __entry->len, __entry->flags, __entry->ret)
);
DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
- TP_PROTO(struct inode *inode, unsigned flags,
- struct ext4_map_blocks *map, int ret),
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
- TP_ARGS(inode, flags, map, ret)
+ TP_ARGS(inode, map, ret)
);
DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
- TP_PROTO(struct inode *inode, unsigned flags,
- struct ext4_map_blocks *map, int ret),
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
- TP_ARGS(inode, flags, map, ret)
+ TP_ARGS(inode, map, ret)
);
TRACE_EVENT(ext4_ext_load_extent,
@@ -1713,50 +1638,25 @@ TRACE_EVENT(ext4_load_inode,
);
TRACE_EVENT(ext4_journal_start,
- TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks,
- unsigned long IP),
+ TP_PROTO(struct super_block *sb, int nblocks, unsigned long IP),
- TP_ARGS(sb, blocks, rsv_blocks, IP),
+ TP_ARGS(sb, nblocks, IP),
TP_STRUCT__entry(
__field( dev_t, dev )
__field(unsigned long, ip )
- __field( int, blocks )
- __field( int, rsv_blocks )
+ __field( int, nblocks )
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->ip = IP;
- __entry->blocks = blocks;
- __entry->rsv_blocks = rsv_blocks;
+ __entry->dev = sb->s_dev;
+ __entry->ip = IP;
+ __entry->nblocks = nblocks;
),
- TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pF",
+ TP_printk("dev %d,%d nblocks %d caller %pF",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->blocks, __entry->rsv_blocks, (void *)__entry->ip)
-);
-
-TRACE_EVENT(ext4_journal_start_reserved,
- TP_PROTO(struct super_block *sb, int blocks, unsigned long IP),
-
- TP_ARGS(sb, blocks, IP),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field(unsigned long, ip )
- __field( int, blocks )
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->ip = IP;
- __entry->blocks = blocks;
- ),
-
- TP_printk("dev %d,%d blocks, %d caller %pF",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->blocks, (void *)__entry->ip)
+ __entry->nblocks, (void *)__entry->ip)
);
DECLARE_EVENT_CLASS(ext4__trim,
@@ -1836,12 +1736,12 @@ TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
__entry->newblk = newblock;
),
- TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %s "
+ TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %x "
"allocated %d newblock %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
- __entry->len, show_map_flags(__entry->flags),
+ __entry->len, __entry->flags,
(unsigned int) __entry->allocated,
(unsigned long long) __entry->newblk)
);
@@ -1869,10 +1769,10 @@ TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
__entry->ret = ret;
),
- TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %s ret %d",
+ TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->lblk, (unsigned long long) __entry->pblk,
- __entry->len, show_mflags(__entry->flags), __entry->ret)
+ __entry->len, __entry->flags, __entry->ret)
);
TRACE_EVENT(ext4_ext_put_in_cache,
@@ -2026,7 +1926,7 @@ TRACE_EVENT(ext4_ext_show_extent,
TRACE_EVENT(ext4_remove_blocks,
TP_PROTO(struct inode *inode, struct ext4_extent *ex,
ext4_lblk_t from, ext4_fsblk_t to,
- long long partial_cluster),
+ ext4_fsblk_t partial_cluster),
TP_ARGS(inode, ex, from, to, partial_cluster),
@@ -2035,7 +1935,7 @@ TRACE_EVENT(ext4_remove_blocks,
__field( ino_t, ino )
__field( ext4_lblk_t, from )
__field( ext4_lblk_t, to )
- __field( long long, partial )
+ __field( ext4_fsblk_t, partial )
__field( ext4_fsblk_t, ee_pblk )
__field( ext4_lblk_t, ee_lblk )
__field( unsigned short, ee_len )
@@ -2053,7 +1953,7 @@ TRACE_EVENT(ext4_remove_blocks,
),
TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
- "from %u to %u partial_cluster %lld",
+ "from %u to %u partial_cluster %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->ee_lblk,
@@ -2061,20 +1961,19 @@ TRACE_EVENT(ext4_remove_blocks,
(unsigned short) __entry->ee_len,
(unsigned) __entry->from,
(unsigned) __entry->to,
- (long long) __entry->partial)
+ (unsigned) __entry->partial)
);
TRACE_EVENT(ext4_ext_rm_leaf,
TP_PROTO(struct inode *inode, ext4_lblk_t start,
- struct ext4_extent *ex,
- long long partial_cluster),
+ struct ext4_extent *ex, ext4_fsblk_t partial_cluster),
TP_ARGS(inode, start, ex, partial_cluster),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( long long, partial )
+ __field( ext4_fsblk_t, partial )
__field( ext4_lblk_t, start )
__field( ext4_lblk_t, ee_lblk )
__field( ext4_fsblk_t, ee_pblk )
@@ -2092,14 +1991,14 @@ TRACE_EVENT(ext4_ext_rm_leaf,
),
TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
- "partial_cluster %lld",
+ "partial_cluster %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->start,
(unsigned) __entry->ee_lblk,
(unsigned long long) __entry->ee_pblk,
(unsigned short) __entry->ee_len,
- (long long) __entry->partial)
+ (unsigned) __entry->partial)
);
TRACE_EVENT(ext4_ext_rm_idx,
@@ -2126,16 +2025,14 @@ TRACE_EVENT(ext4_ext_rm_idx,
);
TRACE_EVENT(ext4_ext_remove_space,
- TP_PROTO(struct inode *inode, ext4_lblk_t start,
- ext4_lblk_t end, int depth),
+ TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
- TP_ARGS(inode, start, end, depth),
+ TP_ARGS(inode, start, depth),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( ext4_lblk_t, start )
- __field( ext4_lblk_t, end )
__field( int, depth )
),
@@ -2143,31 +2040,28 @@ TRACE_EVENT(ext4_ext_remove_space,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->start = start;
- __entry->end = end;
__entry->depth = depth;
),
- TP_printk("dev %d,%d ino %lu since %u end %u depth %d",
+ TP_printk("dev %d,%d ino %lu since %u depth %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->start,
- (unsigned) __entry->end,
__entry->depth)
);
TRACE_EVENT(ext4_ext_remove_space_done,
- TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end,
- int depth, long long partial, __le16 eh_entries),
+ TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
+ ext4_lblk_t partial, __le16 eh_entries),
- TP_ARGS(inode, start, end, depth, partial, eh_entries),
+ TP_ARGS(inode, start, depth, partial, eh_entries),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( ext4_lblk_t, start )
- __field( ext4_lblk_t, end )
__field( int, depth )
- __field( long long, partial )
+ __field( ext4_lblk_t, partial )
__field( unsigned short, eh_entries )
),
@@ -2175,20 +2069,18 @@ TRACE_EVENT(ext4_ext_remove_space_done,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->start = start;
- __entry->end = end;
__entry->depth = depth;
__entry->partial = partial;
__entry->eh_entries = le16_to_cpu(eh_entries);
),
- TP_printk("dev %d,%d ino %lu since %u end %u depth %d partial %lld "
+ TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
"remaining_entries %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->start,
- (unsigned) __entry->end,
__entry->depth,
- (long long) __entry->partial,
+ (unsigned) __entry->partial,
(unsigned short) __entry->eh_entries)
);
@@ -2203,7 +2095,7 @@ TRACE_EVENT(ext4_es_insert_extent,
__field( ext4_lblk_t, lblk )
__field( ext4_lblk_t, len )
__field( ext4_fsblk_t, pblk )
- __field( char, status )
+ __field( unsigned long long, status )
),
TP_fast_assign(
@@ -2212,14 +2104,14 @@ TRACE_EVENT(ext4_es_insert_extent,
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
__entry->pblk = ext4_es_pblock(es);
- __entry->status = ext4_es_status(es) >> 60;
+ __entry->status = ext4_es_status(es);
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->lblk, __entry->len,
- __entry->pblk, show_extent_status(__entry->status))
+ __entry->pblk, __entry->status)
);
TRACE_EVENT(ext4_es_remove_extent,
@@ -2280,7 +2172,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
__field( ext4_lblk_t, lblk )
__field( ext4_lblk_t, len )
__field( ext4_fsblk_t, pblk )
- __field( char, status )
+ __field( unsigned long long, status )
),
TP_fast_assign(
@@ -2289,14 +2181,14 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
__entry->pblk = ext4_es_pblock(es);
- __entry->status = ext4_es_status(es) >> 60;
+ __entry->status = ext4_es_status(es);
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->lblk, __entry->len,
- __entry->pblk, show_extent_status(__entry->status))
+ __entry->pblk, __entry->status)
);
TRACE_EVENT(ext4_es_lookup_extent_enter,
@@ -2333,7 +2225,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
__field( ext4_lblk_t, lblk )
__field( ext4_lblk_t, len )
__field( ext4_fsblk_t, pblk )
- __field( char, status )
+ __field( unsigned long long, status )
__field( int, found )
),
@@ -2343,16 +2235,16 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
__entry->pblk = ext4_es_pblock(es);
- __entry->status = ext4_es_status(es) >> 60;
+ __entry->status = ext4_es_status(es);
__entry->found = found;
),
- TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %s",
+ TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, __entry->found,
__entry->lblk, __entry->len,
__entry->found ? __entry->pblk : 0,
- show_extent_status(__entry->found ? __entry->status : 0))
+ __entry->found ? __entry->status : 0)
);
TRACE_EVENT(ext4_es_shrink_enter,
diff --git a/trunk/include/uapi/linux/Kbuild b/trunk/include/uapi/linux/Kbuild
index bdc6e87ff3eb..ab5d4992e568 100644
--- a/trunk/include/uapi/linux/Kbuild
+++ b/trunk/include/uapi/linux/Kbuild
@@ -261,7 +261,6 @@ header-y += net_dropmon.h
header-y += net_tstamp.h
header-y += netconf.h
header-y += netdevice.h
-header-y += netlink_diag.h
header-y += netfilter.h
header-y += netfilter_arp.h
header-y += netfilter_bridge.h
diff --git a/trunk/kernel/context_tracking.c b/trunk/kernel/context_tracking.c
index 383f8231e436..65349f07b878 100644
--- a/trunk/kernel/context_tracking.c
+++ b/trunk/kernel/context_tracking.c
@@ -15,6 +15,7 @@
*/
#include
+#include
#include
#include
#include
@@ -70,46 +71,6 @@ void user_enter(void)
local_irq_restore(flags);
}
-#ifdef CONFIG_PREEMPT
-/**
- * preempt_schedule_context - preempt_schedule called by tracing
- *
- * The tracing infrastructure uses preempt_enable_notrace to prevent
- * recursion and tracing preempt enabling caused by the tracing
- * infrastructure itself. But as tracing can happen in areas coming
- * from userspace or just about to enter userspace, a preempt enable
- * can occur before user_exit() is called. This will cause the scheduler
- * to be called when the system is still in usermode.
- *
- * To prevent this, the preempt_enable_notrace will use this function
- * instead of preempt_schedule() to exit user context if needed before
- * calling the scheduler.
- */
-void __sched notrace preempt_schedule_context(void)
-{
- struct thread_info *ti = current_thread_info();
- enum ctx_state prev_ctx;
-
- if (likely(ti->preempt_count || irqs_disabled()))
- return;
-
- /*
- * Need to disable preemption in case user_exit() is traced
- * and the tracer calls preempt_enable_notrace() causing
- * an infinite recursion.
- */
- preempt_disable_notrace();
- prev_ctx = exception_enter();
- preempt_enable_no_resched_notrace();
-
- preempt_schedule();
-
- preempt_disable_notrace();
- exception_exit(prev_ctx);
- preempt_enable_notrace();
-}
-EXPORT_SYMBOL_GPL(preempt_schedule_context);
-#endif /* CONFIG_PREEMPT */
/**
* user_exit - Inform the context tracking that the CPU is
diff --git a/trunk/kernel/cpu/idle.c b/trunk/kernel/cpu/idle.c
index e695c0a0bcb5..d5585f5e038e 100644
--- a/trunk/kernel/cpu/idle.c
+++ b/trunk/kernel/cpu/idle.c
@@ -5,7 +5,6 @@
#include
#include
#include
-#include
#include
@@ -59,7 +58,6 @@ void __weak arch_cpu_idle_dead(void) { }
void __weak arch_cpu_idle(void)
{
cpu_idle_force_poll = 1;
- local_irq_enable();
}
/*
@@ -114,21 +112,6 @@ static void cpu_idle_loop(void)
void cpu_startup_entry(enum cpuhp_state state)
{
- /*
- * This #ifdef needs to die, but it's too late in the cycle to
- * make this generic (arm and sh have never invoked the canary
- * init for the non boot cpus!). Will be fixed in 3.11
- */
-#ifdef CONFIG_X86
- /*
- * If we're the non-boot CPU, nothing set the stack canary up
- * for us. The boot CPU already has it initialized but no harm
- * in doing it again. This is a good place for updating it, as
- * we wont ever return from this function (so the invalid
- * canaries already on the stack wont ever trigger).
- */
- boot_init_stack_canary();
-#endif
current_set_polling();
arch_cpu_idle_prepare();
cpu_idle_loop();
diff --git a/trunk/kernel/events/core.c b/trunk/kernel/events/core.c
index b391907d5352..9dc297faf7c0 100644
--- a/trunk/kernel/events/core.c
+++ b/trunk/kernel/events/core.c
@@ -196,6 +196,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
+static void ring_buffer_attach(struct perf_event *event,
+ struct ring_buffer *rb);
+
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
@@ -2915,7 +2918,6 @@ static void free_event_rcu(struct rcu_head *head)
}
static void ring_buffer_put(struct ring_buffer *rb);
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
static void free_event(struct perf_event *event)
{
@@ -2940,30 +2942,15 @@ static void free_event(struct perf_event *event)
if (has_branch_stack(event)) {
static_key_slow_dec_deferred(&perf_sched_events);
/* is system-wide event */
- if (!(event->attach_state & PERF_ATTACH_TASK)) {
+ if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_dec(&per_cpu(perf_branch_stack_events,
event->cpu));
- }
}
}
if (event->rb) {
- struct ring_buffer *rb;
-
- /*
- * Can happen when we close an event with re-directed output.
- *
- * Since we have a 0 refcount, perf_mmap_close() will skip
- * over us; possibly making our ring_buffer_put() the last.
- */
- mutex_lock(&event->mmap_mutex);
- rb = event->rb;
- if (rb) {
- rcu_assign_pointer(event->rb, NULL);
- ring_buffer_detach(event, rb);
- ring_buffer_put(rb); /* could be last */
- }
- mutex_unlock(&event->mmap_mutex);
+ ring_buffer_put(event->rb);
+ event->rb = NULL;
}
if (is_cgroup_event(event))
@@ -3201,13 +3188,30 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
unsigned int events = POLL_HUP;
/*
- * Pin the event->rb by taking event->mmap_mutex; otherwise
- * perf_event_set_output() can swizzle our rb and make us miss wakeups.
+ * Race between perf_event_set_output() and perf_poll(): perf_poll()
+ * grabs the rb reference but perf_event_set_output() overrides it.
+ * Here is the timeline for two threads T1, T2:
+ * t0: T1, rb = rcu_dereference(event->rb)
+ * t1: T2, old_rb = event->rb
+ * t2: T2, event->rb = new rb
+ * t3: T2, ring_buffer_detach(old_rb)
+ * t4: T1, ring_buffer_attach(rb1)
+ * t5: T1, poll_wait(event->waitq)
+ *
+ * To avoid this problem, we grab mmap_mutex in perf_poll()
+ * thereby ensuring that the assignment of the new ring buffer
+ * and the detachment of the old buffer appear atomic to perf_poll()
*/
mutex_lock(&event->mmap_mutex);
- rb = event->rb;
- if (rb)
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+ if (rb) {
+ ring_buffer_attach(event, rb);
events = atomic_xchg(&rb->poll, 0);
+ }
+ rcu_read_unlock();
+
mutex_unlock(&event->mmap_mutex);
poll_wait(file, &event->waitq, wait);
@@ -3517,12 +3521,16 @@ static void ring_buffer_attach(struct perf_event *event,
return;
spin_lock_irqsave(&rb->event_lock, flags);
- if (list_empty(&event->rb_entry))
- list_add(&event->rb_entry, &rb->event_list);
+ if (!list_empty(&event->rb_entry))
+ goto unlock;
+
+ list_add(&event->rb_entry, &rb->event_list);
+unlock:
spin_unlock_irqrestore(&rb->event_lock, flags);
}
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
+static void ring_buffer_detach(struct perf_event *event,
+ struct ring_buffer *rb)
{
unsigned long flags;
@@ -3541,10 +3549,13 @@ static void ring_buffer_wakeup(struct perf_event *event)
rcu_read_lock();
rb = rcu_dereference(event->rb);
- if (rb) {
- list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
- wake_up_all(&event->waitq);
- }
+ if (!rb)
+ goto unlock;
+
+ list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+ wake_up_all(&event->waitq);
+
+unlock:
rcu_read_unlock();
}
@@ -3573,10 +3584,18 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
static void ring_buffer_put(struct ring_buffer *rb)
{
+ struct perf_event *event, *n;
+ unsigned long flags;
+
if (!atomic_dec_and_test(&rb->refcount))
return;
- WARN_ON_ONCE(!list_empty(&rb->event_list));
+ spin_lock_irqsave(&rb->event_lock, flags);
+ list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+ list_del_init(&event->rb_entry);
+ wake_up_all(&event->waitq);
+ }
+ spin_unlock_irqrestore(&rb->event_lock, flags);
call_rcu(&rb->rcu_head, rb_free_rcu);
}
@@ -3586,100 +3605,26 @@ static void perf_mmap_open(struct vm_area_struct *vma)
struct perf_event *event = vma->vm_file->private_data;
atomic_inc(&event->mmap_count);
- atomic_inc(&event->rb->mmap_count);
}
-/*
- * A buffer can be mmap()ed multiple times; either directly through the same
- * event, or through other events by use of perf_event_set_output().
- *
- * In order to undo the VM accounting done by perf_mmap() we need to destroy
- * the buffer here, where we still have a VM context. This means we need
- * to detach all events redirecting to us.
- */
static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
- struct ring_buffer *rb = event->rb;
- struct user_struct *mmap_user = rb->mmap_user;
- int mmap_locked = rb->mmap_locked;
- unsigned long size = perf_data_size(rb);
-
- atomic_dec(&rb->mmap_count);
-
- if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
- return;
+ if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
+ unsigned long size = perf_data_size(event->rb);
+ struct user_struct *user = event->mmap_user;
+ struct ring_buffer *rb = event->rb;
- /* Detach current event from the buffer. */
- rcu_assign_pointer(event->rb, NULL);
- ring_buffer_detach(event, rb);
- mutex_unlock(&event->mmap_mutex);
-
- /* If there's still other mmap()s of this buffer, we're done. */
- if (atomic_read(&rb->mmap_count)) {
- ring_buffer_put(rb); /* can't be last */
- return;
- }
-
- /*
- * No other mmap()s, detach from all other events that might redirect
- * into the now unreachable buffer. Somewhat complicated by the
- * fact that rb::event_lock otherwise nests inside mmap_mutex.
- */
-again:
- rcu_read_lock();
- list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
- if (!atomic_long_inc_not_zero(&event->refcount)) {
- /*
- * This event is en-route to free_event() which will
- * detach it and remove it from the list.
- */
- continue;
- }
- rcu_read_unlock();
-
- mutex_lock(&event->mmap_mutex);
- /*
- * Check we didn't race with perf_event_set_output() which can
- * swizzle the rb from under us while we were waiting to
- * acquire mmap_mutex.
- *
- * If we find a different rb; ignore this event, a next
- * iteration will no longer find it on the list. We have to
- * still restart the iteration to make sure we're not now
- * iterating the wrong list.
- */
- if (event->rb == rb) {
- rcu_assign_pointer(event->rb, NULL);
- ring_buffer_detach(event, rb);
- ring_buffer_put(rb); /* can't be last, we still have one */
- }
+ atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
+ vma->vm_mm->pinned_vm -= event->mmap_locked;
+ rcu_assign_pointer(event->rb, NULL);
+ ring_buffer_detach(event, rb);
mutex_unlock(&event->mmap_mutex);
- put_event(event);
- /*
- * Restart the iteration; either we're on the wrong list or
- * destroyed its integrity by doing a deletion.
- */
- goto again;
+ ring_buffer_put(rb);
+ free_uid(user);
}
- rcu_read_unlock();
-
- /*
- * It could be there's still a few 0-ref events on the list; they'll
- * get cleaned up by free_event() -- they'll also still have their
- * ref on the rb and will free it whenever they are done with it.
- *
- * Aside from that, this buffer is 'fully' detached and unmapped,
- * undo the VM accounting.
- */
-
- atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
- vma->vm_mm->pinned_vm -= mmap_locked;
- free_uid(mmap_user);
-
- ring_buffer_put(rb); /* could be last */
}
static const struct vm_operations_struct perf_mmap_vmops = {
@@ -3729,24 +3674,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
WARN_ON_ONCE(event->ctx->parent_ctx);
-again:
mutex_lock(&event->mmap_mutex);
if (event->rb) {
- if (event->rb->nr_pages != nr_pages) {
+ if (event->rb->nr_pages == nr_pages)
+ atomic_inc(&event->rb->refcount);
+ else
ret = -EINVAL;
- goto unlock;
- }
-
- if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
- /*
- * Raced against perf_mmap_close() through
- * perf_event_set_output(). Try again, hope for better
- * luck.
- */
- mutex_unlock(&event->mmap_mutex);
- goto again;
- }
-
goto unlock;
}
@@ -3787,16 +3720,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
ret = -ENOMEM;
goto unlock;
}
-
- atomic_set(&rb->mmap_count, 1);
- rb->mmap_locked = extra;
- rb->mmap_user = get_current_user();
+ rcu_assign_pointer(event->rb, rb);
atomic_long_add(user_extra, &user->locked_vm);
- vma->vm_mm->pinned_vm += extra;
-
- ring_buffer_attach(event, rb);
- rcu_assign_pointer(event->rb, rb);
+ event->mmap_locked = extra;
+ event->mmap_user = get_current_user();
+ vma->vm_mm->pinned_vm += event->mmap_locked;
perf_event_update_userpage(event);
@@ -3805,11 +3734,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
atomic_inc(&event->mmap_count);
mutex_unlock(&event->mmap_mutex);
- /*
- * Since pinned accounting is per vm we cannot allow fork() to copy our
- * vma.
- */
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &perf_mmap_vmops;
return ret;
@@ -6487,8 +6412,6 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
if (atomic_read(&event->mmap_count))
goto unlock;
- old_rb = event->rb;
-
if (output_event) {
/* get the rb we want to redirect to */
rb = ring_buffer_get(output_event);
@@ -6496,28 +6419,16 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
goto unlock;
}
+ old_rb = event->rb;
+ rcu_assign_pointer(event->rb, rb);
if (old_rb)
ring_buffer_detach(event, old_rb);
-
- if (rb)
- ring_buffer_attach(event, rb);
-
- rcu_assign_pointer(event->rb, rb);
-
- if (old_rb) {
- ring_buffer_put(old_rb);
- /*
- * Since we detached before setting the new rb, so that we
- * could attach the new rb, we could have missed a wakeup.
- * Provide it now.
- */
- wake_up_all(&event->waitq);
- }
-
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
+ if (old_rb)
+ ring_buffer_put(old_rb);
out:
return ret;
}
diff --git a/trunk/kernel/events/hw_breakpoint.c b/trunk/kernel/events/hw_breakpoint.c
index 20185ea64aa6..a64f8aeb5c1f 100644
--- a/trunk/kernel/events/hw_breakpoint.c
+++ b/trunk/kernel/events/hw_breakpoint.c
@@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
if (iter->hw.bp_target == tsk &&
find_slot_idx(iter) == type &&
- (iter->cpu < 0 || cpu == iter->cpu))
+ cpu == iter->cpu)
count += hw_breakpoint_weight(iter);
}
@@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
return;
}
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
unsigned int nr;
nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
if (cpu >= 0) {
toggle_bp_task_slot(bp, cpu, enable, type, weight);
} else {
- for_each_possible_cpu(cpu)
+ for_each_online_cpu(cpu)
toggle_bp_task_slot(bp, cpu, enable, type, weight);
}
diff --git a/trunk/kernel/events/internal.h b/trunk/kernel/events/internal.h
index ca6599723be5..eb675c4d59df 100644
--- a/trunk/kernel/events/internal.h
+++ b/trunk/kernel/events/internal.h
@@ -31,10 +31,6 @@ struct ring_buffer {
spinlock_t event_lock;
struct list_head event_list;
- atomic_t mmap_count;
- unsigned long mmap_locked;
- struct user_struct *mmap_user;
-
struct perf_event_mmap_page *user_page;
void *data_pages[0];
};
diff --git a/trunk/kernel/exit.c b/trunk/kernel/exit.c
index 7bb73f9d09db..af2eb3cbd499 100644
--- a/trunk/kernel/exit.c
+++ b/trunk/kernel/exit.c
@@ -649,6 +649,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
* jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
forget_original_parent(tsk);
+ exit_task_namespaces(tsk);
write_lock_irq(&tasklist_lock);
if (group_dead)
@@ -794,7 +795,6 @@ void do_exit(long code)
exit_shm(tsk);
exit_files(tsk);
exit_fs(tsk);
- exit_task_namespaces(tsk);
exit_task_work(tsk);
check_stack_usage();
exit_thread();
diff --git a/trunk/kernel/kprobes.c b/trunk/kernel/kprobes.c
index bddf3b201a48..3fed7f0cbcdf 100644
--- a/trunk/kernel/kprobes.c
+++ b/trunk/kernel/kprobes.c
@@ -467,7 +467,6 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
/* Optimization staging list, protected by kprobe_mutex */
static LIST_HEAD(optimizing_list);
static LIST_HEAD(unoptimizing_list);
-static LIST_HEAD(freeing_list);
static void kprobe_optimizer(struct work_struct *work);
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
@@ -505,7 +504,7 @@ static __kprobes void do_optimize_kprobes(void)
* Unoptimize (replace a jump with a breakpoint and remove the breakpoint
* if need) kprobes listed on unoptimizing_list.
*/
-static __kprobes void do_unoptimize_kprobes(void)
+static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
{
struct optimized_kprobe *op, *tmp;
@@ -516,9 +515,9 @@ static __kprobes void do_unoptimize_kprobes(void)
/* Ditto to do_optimize_kprobes */
get_online_cpus();
mutex_lock(&text_mutex);
- arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+ arch_unoptimize_kprobes(&unoptimizing_list, free_list);
/* Loop free_list for disarming */
- list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+ list_for_each_entry_safe(op, tmp, free_list, list) {
/* Disarm probes if marked disabled */
if (kprobe_disabled(&op->kp))
arch_disarm_kprobe(&op->kp);
@@ -537,11 +536,11 @@ static __kprobes void do_unoptimize_kprobes(void)
}
/* Reclaim all kprobes on the free_list */
-static __kprobes void do_free_cleaned_kprobes(void)
+static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
{
struct optimized_kprobe *op, *tmp;
- list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+ list_for_each_entry_safe(op, tmp, free_list, list) {
BUG_ON(!kprobe_unused(&op->kp));
list_del_init(&op->list);
free_aggr_kprobe(&op->kp);
@@ -557,6 +556,8 @@ static __kprobes void kick_kprobe_optimizer(void)
/* Kprobe jump optimizer */
static __kprobes void kprobe_optimizer(struct work_struct *work)
{
+ LIST_HEAD(free_list);
+
mutex_lock(&kprobe_mutex);
/* Lock modules while optimizing kprobes */
mutex_lock(&module_mutex);
@@ -565,7 +566,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
* kprobes before waiting for quiesence period.
*/
- do_unoptimize_kprobes();
+ do_unoptimize_kprobes(&free_list);
/*
* Step 2: Wait for quiesence period to ensure all running interrupts
@@ -580,7 +581,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
do_optimize_kprobes();
/* Step 4: Free cleaned kprobes after quiesence period */
- do_free_cleaned_kprobes();
+ do_free_cleaned_kprobes(&free_list);
mutex_unlock(&module_mutex);
mutex_unlock(&kprobe_mutex);
@@ -722,19 +723,8 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p)
if (!list_empty(&op->list))
/* Dequeue from the (un)optimization queue */
list_del_init(&op->list);
- op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
-
- if (kprobe_unused(p)) {
- /* Enqueue if it is unused */
- list_add(&op->list, &freeing_list);
- /*
- * Remove unused probes from the hash list. After waiting
- * for synchronization, this probe is reclaimed.
- * (reclaiming is done by do_free_cleaned_kprobes().)
- */
- hlist_del_rcu(&op->kp.hlist);
- }
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
/* Don't touch the code, because it is already freed. */
arch_remove_optimized_kprobe(op);
}
diff --git a/trunk/kernel/ptrace.c b/trunk/kernel/ptrace.c
index 335a7ae697f5..aed981a3f69c 100644
--- a/trunk/kernel/ptrace.c
+++ b/trunk/kernel/ptrace.c
@@ -665,22 +665,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,
if (unlikely(is_compat_task())) {
compat_siginfo_t __user *uinfo = compat_ptr(data);
- if (copy_siginfo_to_user32(uinfo, &info) ||
- __put_user(info.si_code, &uinfo->si_code)) {
- ret = -EFAULT;
- break;
- }
-
+ ret = copy_siginfo_to_user32(uinfo, &info);
+ ret |= __put_user(info.si_code, &uinfo->si_code);
} else
#endif
{
siginfo_t __user *uinfo = (siginfo_t __user *) data;
- if (copy_siginfo_to_user(uinfo, &info) ||
- __put_user(info.si_code, &uinfo->si_code)) {
- ret = -EFAULT;
- break;
- }
+ ret = copy_siginfo_to_user(uinfo, &info);
+ ret |= __put_user(info.si_code, &uinfo->si_code);
+ }
+
+ if (ret) {
+ ret = -EFAULT;
+ break;
}
data += sizeof(siginfo_t);
diff --git a/trunk/kernel/range.c b/trunk/kernel/range.c
index 322ea8e93e4b..eb911dbce267 100644
--- a/trunk/kernel/range.c
+++ b/trunk/kernel/range.c
@@ -4,7 +4,7 @@
#include
#include
#include
-#include
+
#include
int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
@@ -32,8 +32,9 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
if (start >= end)
return nr_range;
- /* get new start/end: */
+ /* Try to merge it with old one: */
for (i = 0; i < nr_range; i++) {
+ u64 final_start, final_end;
u64 common_start, common_end;
if (!range[i].end)
@@ -44,16 +45,14 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
if (common_start > common_end)
continue;
- /* new start/end, will add it back at last */
- start = min(range[i].start, start);
- end = max(range[i].end, end);
+ final_start = min(range[i].start, start);
+ final_end = max(range[i].end, end);
- memmove(&range[i], &range[i + 1],
- (nr_range - (i + 1)) * sizeof(range[i]));
- range[nr_range - 1].start = 0;
- range[nr_range - 1].end = 0;
- nr_range--;
- i--;
+ /* clear it and add it back for further merge */
+ range[i].start = 0;
+ range[i].end = 0;
+ return add_range_with_merge(range, az, nr_range,
+ final_start, final_end);
}
/* Need to add it: */
diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c
index e8b335016c52..58453b8272fd 100644
--- a/trunk/kernel/sched/core.c
+++ b/trunk/kernel/sched/core.c
@@ -633,19 +633,7 @@ void wake_up_nohz_cpu(int cpu)
static inline bool got_nohz_idle_kick(void)
{
int cpu = smp_processor_id();
-
- if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
- return false;
-
- if (idle_cpu(cpu) && !need_resched())
- return true;
-
- /*
- * We can't run Idle Load Balance on this CPU for this time so we
- * cancel it and clear NOHZ_BALANCE_KICK
- */
- clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
- return false;
+ return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
}
#else /* CONFIG_NO_HZ_COMMON */
@@ -1405,9 +1393,8 @@ static void sched_ttwu_pending(void)
void scheduler_ipi(void)
{
- if (llist_empty(&this_rq()->wake_list)
- && !tick_nohz_full_cpu(smp_processor_id())
- && !got_nohz_idle_kick())
+ if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()
+ && !tick_nohz_full_cpu(smp_processor_id()))
return;
/*
@@ -1430,7 +1417,7 @@ void scheduler_ipi(void)
/*
* Check if someone kicked us for doing the nohz idle load balance.
*/
- if (unlikely(got_nohz_idle_kick())) {
+ if (unlikely(got_nohz_idle_kick() && !need_resched())) {
this_rq()->idle_balance = 1;
raise_softirq_irqoff(SCHED_SOFTIRQ);
}
@@ -4758,7 +4745,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
*/
idle->sched_class = &idle_sched_class;
ftrace_graph_init_idle_task(idle, cpu);
- vtime_init_idle(idle, cpu);
+ vtime_init_idle(idle);
#if defined(CONFIG_SMP)
sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
#endif
diff --git a/trunk/kernel/sched/cputime.c b/trunk/kernel/sched/cputime.c
index b5ccba22603b..cc2dc3eea8a3 100644
--- a/trunk/kernel/sched/cputime.c
+++ b/trunk/kernel/sched/cputime.c
@@ -747,17 +747,17 @@ void arch_vtime_task_switch(struct task_struct *prev)
write_seqlock(¤t->vtime_seqlock);
current->vtime_snap_whence = VTIME_SYS;
- current->vtime_snap = sched_clock_cpu(smp_processor_id());
+ current->vtime_snap = sched_clock();
write_sequnlock(¤t->vtime_seqlock);
}
-void vtime_init_idle(struct task_struct *t, int cpu)
+void vtime_init_idle(struct task_struct *t)
{
unsigned long flags;
write_seqlock_irqsave(&t->vtime_seqlock, flags);
t->vtime_snap_whence = VTIME_SYS;
- t->vtime_snap = sched_clock_cpu(cpu);
+ t->vtime_snap = sched_clock();
write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
}
diff --git a/trunk/kernel/time/tick-broadcast.c b/trunk/kernel/time/tick-broadcast.c
index 20d6fba70652..0c739423b0f9 100644
--- a/trunk/kernel/time/tick-broadcast.c
+++ b/trunk/kernel/time/tick-broadcast.c
@@ -599,6 +599,8 @@ void tick_broadcast_oneshot_control(unsigned long reason)
} else {
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
+ if (dev->next_event.tv64 == KTIME_MAX)
+ goto out;
/*
* The cpu which was handling the broadcast
* timer marked this cpu in the broadcast
@@ -612,11 +614,6 @@ void tick_broadcast_oneshot_control(unsigned long reason)
tick_broadcast_pending_mask))
goto out;
- /*
- * Bail out if there is no next event.
- */
- if (dev->next_event.tv64 == KTIME_MAX)
- goto out;
/*
* If the pending bit is not set, then we are
* either the CPU handling the broadcast
@@ -701,6 +698,10 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
bc->event_handler = tick_handle_oneshot_broadcast;
+ /* Take the do_timer update */
+ if (!tick_nohz_full_cpu(cpu))
+ tick_do_timer_cpu = cpu;
+
/*
* We must be careful here. There might be other CPUs
* waiting for periodic broadcast. We need to set the
diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c
index 0cf1c1453181..f4208138fbf4 100644
--- a/trunk/kernel/time/tick-sched.c
+++ b/trunk/kernel/time/tick-sched.c
@@ -306,7 +306,7 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb,
* we can't safely shutdown that CPU.
*/
if (have_nohz_full_mask && tick_do_timer_cpu == cpu)
- return NOTIFY_BAD;
+ return -EINVAL;
break;
}
return NOTIFY_OK;
diff --git a/trunk/mm/readahead.c b/trunk/mm/readahead.c
index 829a77c62834..daed28dd5830 100644
--- a/trunk/mm/readahead.c
+++ b/trunk/mm/readahead.c
@@ -48,7 +48,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
if (!trylock_page(page))
BUG();
page->mapping = mapping;
- do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ do_invalidatepage(page, 0);
page->mapping = NULL;
unlock_page(page);
}
diff --git a/trunk/mm/slab_common.c b/trunk/mm/slab_common.c
index 2d414508e9ec..ff3218a0f5e1 100644
--- a/trunk/mm/slab_common.c
+++ b/trunk/mm/slab_common.c
@@ -373,10 +373,8 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
{
int index;
- if (size > KMALLOC_MAX_SIZE) {
- WARN_ON_ONCE(!(flags & __GFP_NOWARN));
+ if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
return NULL;
- }
if (size <= 192) {
if (!size)
diff --git a/trunk/mm/truncate.c b/trunk/mm/truncate.c
index e2e8a8a7eb9d..c75b736e54b7 100644
--- a/trunk/mm/truncate.c
+++ b/trunk/mm/truncate.c
@@ -26,8 +26,7 @@
/**
* do_invalidatepage - invalidate part or all of a page
* @page: the page which is affected
- * @offset: start of the range to invalidate
- * @length: length of the range to invalidate
+ * @offset: the index of the truncation point
*
* do_invalidatepage() is called when all or part of the page has become
* invalidated by a truncate operation.
@@ -38,18 +37,24 @@
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-void do_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+void do_invalidatepage(struct page *page, unsigned long offset)
{
- void (*invalidatepage)(struct page *, unsigned int, unsigned int);
-
+ void (*invalidatepage)(struct page *, unsigned long);
invalidatepage = page->mapping->a_ops->invalidatepage;
#ifdef CONFIG_BLOCK
if (!invalidatepage)
invalidatepage = block_invalidatepage;
#endif
if (invalidatepage)
- (*invalidatepage)(page, offset, length);
+ (*invalidatepage)(page, offset);
+}
+
+static inline void truncate_partial_page(struct page *page, unsigned partial)
+{
+ zero_user_segment(page, partial, PAGE_CACHE_SIZE);
+ cleancache_invalidate_page(page->mapping, page);
+ if (page_has_private(page))
+ do_invalidatepage(page, partial);
}
/*
@@ -98,7 +103,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
return -EIO;
if (page_has_private(page))
- do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ do_invalidatepage(page, 0);
cancel_dirty_page(page, PAGE_CACHE_SIZE);
@@ -180,11 +185,11 @@ int invalidate_inode_page(struct page *page)
* truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate
* @lstart: offset from which to truncate
- * @lend: offset to which to truncate (inclusive)
+ * @lend: offset to which to truncate
*
* Truncate the page cache, removing the pages that are between
- * specified offsets (and zeroing out partial pages
- * if lstart or lend + 1 is not page aligned).
+ * specified offsets (and zeroing out partial page
+ * (if lstart is not page aligned)).
*
* Truncate takes two passes - the first pass is nonblocking. It will not
* block on page locks and it will not block on writeback. The second pass
@@ -195,58 +200,35 @@ int invalidate_inode_page(struct page *page)
* We pass down the cache-hot hint to the page freeing code. Even if the
* mapping is large, it is probably the case that the final pages are the most
* recently touched, and freeing happens in ascending file offset order.
- *
- * Note that since ->invalidatepage() accepts range to invalidate
- * truncate_inode_pages_range is able to handle cases where lend + 1 is not
- * page aligned properly.
*/
void truncate_inode_pages_range(struct address_space *mapping,
loff_t lstart, loff_t lend)
{
- pgoff_t start; /* inclusive */
- pgoff_t end; /* exclusive */
- unsigned int partial_start; /* inclusive */
- unsigned int partial_end; /* exclusive */
- struct pagevec pvec;
- pgoff_t index;
- int i;
+ const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
+ const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
+ struct pagevec pvec;
+ pgoff_t index;
+ pgoff_t end;
+ int i;
cleancache_invalidate_inode(mapping);
if (mapping->nrpages == 0)
return;
- /* Offsets within partial pages */
- partial_start = lstart & (PAGE_CACHE_SIZE - 1);
- partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
-
- /*
- * 'start' and 'end' always covers the range of pages to be fully
- * truncated. Partial pages are covered with 'partial_start' at the
- * start of the range and 'partial_end' at the end of the range.
- * Note that 'end' is exclusive while 'lend' is inclusive.
- */
- start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (lend == -1)
- /*
- * lend == -1 indicates end-of-file so we have to set 'end'
- * to the highest possible pgoff_t and since the type is
- * unsigned we're using -1.
- */
- end = -1;
- else
- end = (lend + 1) >> PAGE_CACHE_SHIFT;
+ BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
+ end = (lend >> PAGE_CACHE_SHIFT);
pagevec_init(&pvec, 0);
index = start;
- while (index < end && pagevec_lookup(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
+ while (index <= end && pagevec_lookup(&pvec, mapping, index,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
/* We rely upon deletion not changing page->index */
index = page->index;
- if (index >= end)
+ if (index > end)
break;
if (!trylock_page(page))
@@ -265,56 +247,27 @@ void truncate_inode_pages_range(struct address_space *mapping,
index++;
}
- if (partial_start) {
+ if (partial) {
struct page *page = find_lock_page(mapping, start - 1);
if (page) {
- unsigned int top = PAGE_CACHE_SIZE;
- if (start > end) {
- /* Truncation within a single page */
- top = partial_end;
- partial_end = 0;
- }
wait_on_page_writeback(page);
- zero_user_segment(page, partial_start, top);
- cleancache_invalidate_page(mapping, page);
- if (page_has_private(page))
- do_invalidatepage(page, partial_start,
- top - partial_start);
+ truncate_partial_page(page, partial);
unlock_page(page);
page_cache_release(page);
}
}
- if (partial_end) {
- struct page *page = find_lock_page(mapping, end);
- if (page) {
- wait_on_page_writeback(page);
- zero_user_segment(page, 0, partial_end);
- cleancache_invalidate_page(mapping, page);
- if (page_has_private(page))
- do_invalidatepage(page, 0,
- partial_end);
- unlock_page(page);
- page_cache_release(page);
- }
- }
- /*
- * If the truncation happened within a single page no pages
- * will be released, just zeroed, so we can bail out now.
- */
- if (start >= end)
- return;
index = start;
for ( ; ; ) {
cond_resched();
if (!pagevec_lookup(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
+ min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
if (index == start)
break;
index = start;
continue;
}
- if (index == start && pvec.pages[0]->index >= end) {
+ if (index == start && pvec.pages[0]->index > end) {
pagevec_release(&pvec);
break;
}
@@ -324,7 +277,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
/* We rely upon deletion not changing page->index */
index = page->index;
- if (index >= end)
+ if (index > end)
break;
lock_page(page);
@@ -645,8 +598,10 @@ void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
* This rounding is currently just for example: unmap_mapping_range
* expands its hole outwards, whereas we want it to contract the hole
* inwards. However, existing callers of truncate_pagecache_range are
- * doing their own page rounding first. Note that unmap_mapping_range
- * allows holelen 0 for all, and we allow lend -1 for end of file.
+ * doing their own page rounding first; and truncate_inode_pages_range
+ * currently BUGs if lend is not pagealigned-1 (it handles partial
+ * page at start of hole, but not partial page at end of hole). Note
+ * unmap_mapping_range allows holelen 0 for all, and we allow lend -1.
*/
/*
diff --git a/trunk/net/batman-adv/bat_iv_ogm.c b/trunk/net/batman-adv/bat_iv_ogm.c
index f680ee101878..071f288b77a8 100644
--- a/trunk/net/batman-adv/bat_iv_ogm.c
+++ b/trunk/net/batman-adv/bat_iv_ogm.c
@@ -29,21 +29,6 @@
#include "bat_algo.h"
#include "network-coding.h"
-/**
- * batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is a duplicate
- * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
- * neighbor)
- * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
- * @BATADV_PROTECTED: originator is currently protected (after reboot)
- */
-enum batadv_dup_status {
- BATADV_NO_DUP = 0,
- BATADV_ORIG_DUP,
- BATADV_NEIGH_DUP,
- BATADV_PROTECTED,
-};
-
static struct batadv_neigh_node *
batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
const uint8_t *neigh_addr,
@@ -665,7 +650,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
const struct batadv_ogm_packet *batadv_ogm_packet,
struct batadv_hard_iface *if_incoming,
const unsigned char *tt_buff,
- enum batadv_dup_status dup_status)
+ int is_duplicate)
{
struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
struct batadv_neigh_node *router = NULL;
@@ -691,7 +676,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
continue;
}
- if (dup_status != BATADV_NO_DUP)
+ if (is_duplicate)
continue;
spin_lock_bh(&tmp_neigh_node->lq_update_lock);
@@ -733,7 +718,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
spin_unlock_bh(&neigh_node->lq_update_lock);
- if (dup_status == BATADV_NO_DUP) {
+ if (!is_duplicate) {
orig_node->last_ttl = batadv_ogm_packet->header.ttl;
neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
}
@@ -917,16 +902,15 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
return ret;
}
-/**
- * batadv_iv_ogm_update_seqnos - process a batman packet for all interfaces,
- * adjust the sequence number and find out whether it is a duplicate
- * @ethhdr: ethernet header of the packet
- * @batadv_ogm_packet: OGM packet to be considered
- * @if_incoming: interface on which the OGM packet was received
- *
- * Returns duplicate status as enum batadv_dup_status
+/* processes a batman packet for all interfaces, adjusts the sequence number and
+ * finds out whether it is a duplicate.
+ * returns:
+ * 1 the packet is a duplicate
+ * 0 the packet has not yet been received
+ * -1 the packet is old and has been received while the seqno window
+ * was protected. Caller should drop it.
*/
-static enum batadv_dup_status
+static int
batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
const struct batadv_ogm_packet *batadv_ogm_packet,
const struct batadv_hard_iface *if_incoming)
@@ -934,18 +918,17 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_orig_node *orig_node;
struct batadv_neigh_node *tmp_neigh_node;
- int is_dup;
+ int is_duplicate = 0;
int32_t seq_diff;
int need_update = 0;
- int set_mark;
- enum batadv_dup_status ret = BATADV_NO_DUP;
+ int set_mark, ret = -1;
uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
uint8_t *neigh_addr;
uint8_t packet_count;
orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
if (!orig_node)
- return BATADV_NO_DUP;
+ return 0;
spin_lock_bh(&orig_node->ogm_cnt_lock);
seq_diff = seqno - orig_node->last_real_seqno;
@@ -953,29 +936,22 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
/* signalize caller that the packet is to be dropped. */
if (!hlist_empty(&orig_node->neigh_list) &&
batadv_window_protected(bat_priv, seq_diff,
- &orig_node->batman_seqno_reset)) {
- ret = BATADV_PROTECTED;
+ &orig_node->batman_seqno_reset))
goto out;
- }
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node,
&orig_node->neigh_list, list) {
- neigh_addr = tmp_neigh_node->addr;
- is_dup = batadv_test_bit(tmp_neigh_node->real_bits,
- orig_node->last_real_seqno,
- seqno);
+ is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
+ orig_node->last_real_seqno,
+ seqno);
+ neigh_addr = tmp_neigh_node->addr;
if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
- tmp_neigh_node->if_incoming == if_incoming) {
+ tmp_neigh_node->if_incoming == if_incoming)
set_mark = 1;
- if (is_dup)
- ret = BATADV_NEIGH_DUP;
- } else {
+ else
set_mark = 0;
- if (is_dup && (ret != BATADV_NEIGH_DUP))
- ret = BATADV_ORIG_DUP;
- }
/* if the window moved, set the update flag. */
need_update |= batadv_bit_get_packet(bat_priv,
@@ -995,6 +971,8 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
orig_node->last_real_seqno = seqno;
}
+ ret = is_duplicate;
+
out:
spin_unlock_bh(&orig_node->ogm_cnt_lock);
batadv_orig_node_free_ref(orig_node);
@@ -1016,8 +994,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
int is_broadcast = 0, is_bidirect;
bool is_single_hop_neigh = false;
bool is_from_best_next_hop = false;
- int sameseq, similar_ttl;
- enum batadv_dup_status dup_status;
+ int is_duplicate, sameseq, simlar_ttl;
uint32_t if_incoming_seqno;
uint8_t *prev_sender;
@@ -1161,10 +1138,10 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
if (!orig_node)
return;
- dup_status = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
- if_incoming);
+ is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
+ if_incoming);
- if (dup_status == BATADV_PROTECTED) {
+ if (is_duplicate == -1) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: packet within seqno protection time (sender: %pM)\n",
ethhdr->h_source);
@@ -1234,12 +1211,11 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
* seqno and similar ttl as the non-duplicate
*/
sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
- similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
- if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
- (sameseq && similar_ttl)))
+ simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+ if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl)))
batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
batadv_ogm_packet, if_incoming,
- tt_buff, dup_status);
+ tt_buff, is_duplicate);
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
@@ -1260,7 +1236,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
goto out_neigh;
}
- if (dup_status == BATADV_NEIGH_DUP) {
+ if (is_duplicate) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: duplicate packet received\n");
goto out_neigh;
diff --git a/trunk/net/batman-adv/bridge_loop_avoidance.c b/trunk/net/batman-adv/bridge_loop_avoidance.c
index de27b3175cfd..379061c72549 100644
--- a/trunk/net/batman-adv/bridge_loop_avoidance.c
+++ b/trunk/net/batman-adv/bridge_loop_avoidance.c
@@ -1067,10 +1067,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
bat_priv->bla.claim_dest.group = group;
- /* purge everything when bridge loop avoidance is turned off */
- if (!atomic_read(&bat_priv->bridge_loop_avoidance))
- oldif = NULL;
-
if (!oldif) {
batadv_bla_purge_claims(bat_priv, NULL, 1);
batadv_bla_purge_backbone_gw(bat_priv, 1);
diff --git a/trunk/net/batman-adv/sysfs.c b/trunk/net/batman-adv/sysfs.c
index 929e304dacb2..15a22efa9a67 100644
--- a/trunk/net/batman-adv/sysfs.c
+++ b/trunk/net/batman-adv/sysfs.c
@@ -582,7 +582,10 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
(strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
goto out;
- rtnl_lock();
+ if (!rtnl_trylock()) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
if (status_tmp == BATADV_IF_NOT_IN_USE) {
batadv_hardif_disable_interface(hard_iface,
diff --git a/trunk/net/bluetooth/hci_core.c b/trunk/net/bluetooth/hci_core.c
index ace5e55fe5a3..d817c932d634 100644
--- a/trunk/net/bluetooth/hci_core.c
+++ b/trunk/net/bluetooth/hci_core.c
@@ -341,6 +341,7 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
static void bredr_setup(struct hci_request *req)
{
+ struct hci_cp_delete_stored_link_key cp;
__le16 param;
__u8 flt_type;
@@ -364,6 +365,10 @@ static void bredr_setup(struct hci_request *req)
param = __constant_cpu_to_le16(0x7d00);
hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.delete_all = 0x01;
+ hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
+
/* Read page scan parameters */
if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
@@ -597,16 +602,6 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
struct hci_dev *hdev = req->hdev;
u8 p;
- /* Only send HCI_Delete_Stored_Link_Key if it is supported */
- if (hdev->commands[6] & 0x80) {
- struct hci_cp_delete_stored_link_key cp;
-
- bacpy(&cp.bdaddr, BDADDR_ANY);
- cp.delete_all = 0x01;
- hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
- sizeof(cp), &cp);
- }
-
if (hdev->commands[5] & 0x10)
hci_setup_link_policy(req);
diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c
index 68843a28a7af..24bee07ee4ce 100644
--- a/trunk/net/bluetooth/l2cap_core.c
+++ b/trunk/net/bluetooth/l2cap_core.c
@@ -2852,9 +2852,6 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
conn, code, ident, dlen);
- if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
- return NULL;
-
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
count = min_t(unsigned int, conn->mtu, len);
@@ -4333,7 +4330,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
u16 type, result;
- if (cmd_len < sizeof(*rsp))
+ if (cmd_len != sizeof(*rsp))
return -EPROTO;
type = __le16_to_cpu(rsp->type);
diff --git a/trunk/net/bridge/br_multicast.c b/trunk/net/bridge/br_multicast.c
index d6448e35e027..81f2389f78eb 100644
--- a/trunk/net/bridge/br_multicast.c
+++ b/trunk/net/bridge/br_multicast.c
@@ -465,9 +465,8 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
skb_set_transport_header(skb, skb->len);
mldq = (struct mld_msg *) icmp6_hdr(skb);
- interval = ipv6_addr_any(group) ?
- br->multicast_query_response_interval :
- br->multicast_last_member_interval;
+ interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
+ br->multicast_query_response_interval;
mldq->mld_type = ICMPV6_MGM_QUERY;
mldq->mld_code = 0;
diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c
index faebb398fb46..fc1e289397f5 100644
--- a/trunk/net/core/dev.c
+++ b/trunk/net/core/dev.c
@@ -791,40 +791,6 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
}
EXPORT_SYMBOL(dev_get_by_index);
-/**
- * netdev_get_name - get a netdevice name, knowing its ifindex.
- * @net: network namespace
- * @name: a pointer to the buffer where the name will be stored.
- * @ifindex: the ifindex of the interface to get the name from.
- *
- * The use of raw_seqcount_begin() and cond_resched() before
- * retrying is required as we want to give the writers a chance
- * to complete when CONFIG_PREEMPT is not set.
- */
-int netdev_get_name(struct net *net, char *name, int ifindex)
-{
- struct net_device *dev;
- unsigned int seq;
-
-retry:
- seq = raw_seqcount_begin(&devnet_rename_seq);
- rcu_read_lock();
- dev = dev_get_by_index_rcu(net, ifindex);
- if (!dev) {
- rcu_read_unlock();
- return -ENODEV;
- }
-
- strcpy(name, dev->name);
- rcu_read_unlock();
- if (read_seqcount_retry(&devnet_rename_seq, seq)) {
- cond_resched();
- goto retry;
- }
-
- return 0;
-}
-
/**
* dev_getbyhwaddr_rcu - find a device by its hardware address
* @net: the applicable net namespace
diff --git a/trunk/net/core/dev_ioctl.c b/trunk/net/core/dev_ioctl.c
index 5b7d0e1d0664..6cc0481faade 100644
--- a/trunk/net/core/dev_ioctl.c
+++ b/trunk/net/core/dev_ioctl.c
@@ -19,8 +19,9 @@
static int dev_ifname(struct net *net, struct ifreq __user *arg)
{
+ struct net_device *dev;
struct ifreq ifr;
- int error;
+ unsigned seq;
/*
* Fetch the caller's info block.
@@ -29,9 +30,19 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
- error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
- if (error)
- return error;
+retry:
+ seq = read_seqcount_begin(&devnet_rename_seq);
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
+ if (!dev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+
+ strcpy(ifr.ifr_name, dev->name);
+ rcu_read_unlock();
+ if (read_seqcount_retry(&devnet_rename_seq, seq))
+ goto retry;
if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
return -EFAULT;
diff --git a/trunk/net/core/ethtool.c b/trunk/net/core/ethtool.c
index ce91766eeca9..22efdaa76ebf 100644
--- a/trunk/net/core/ethtool.c
+++ b/trunk/net/core/ethtool.c
@@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6",
[NETIF_F_HIGHDMA_BIT] = "highdma",
[NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist",
- [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert",
+ [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-ctag-hw-insert",
- [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse",
- [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter",
+ [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-ctag-hw-parse",
+ [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter",
[NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert",
[NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse",
[NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c
index 1c1738cc4538..cfd777bd6bd0 100644
--- a/trunk/net/core/skbuff.c
+++ b/trunk/net/core/skbuff.c
@@ -483,8 +483,15 @@ EXPORT_SYMBOL(skb_add_rx_frag);
static void skb_drop_list(struct sk_buff **listp)
{
- kfree_skb_list(*listp);
+ struct sk_buff *list = *listp;
+
*listp = NULL;
+
+ do {
+ struct sk_buff *this = list;
+ list = list->next;
+ kfree_skb(this);
+ } while (list);
}
static inline void skb_drop_fraglist(struct sk_buff *skb)
@@ -644,17 +651,6 @@ void kfree_skb(struct sk_buff *skb)
}
EXPORT_SYMBOL(kfree_skb);
-void kfree_skb_list(struct sk_buff *segs)
-{
- while (segs) {
- struct sk_buff *next = segs->next;
-
- kfree_skb(segs);
- segs = next;
- }
-}
-EXPORT_SYMBOL(kfree_skb_list);
-
/**
* skb_tx_error - report an sk_buff xmit error
* @skb: buffer that triggered an error
diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c
index d6d024cfaaaf..88868a9d21da 100644
--- a/trunk/net/core/sock.c
+++ b/trunk/net/core/sock.c
@@ -571,7 +571,9 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
struct net *net = sock_net(sk);
+ struct net_device *dev;
char devname[IFNAMSIZ];
+ unsigned seq;
if (sk->sk_bound_dev_if == 0) {
len = 0;
@@ -582,9 +584,20 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
if (len < IFNAMSIZ)
goto out;
- ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
- if (ret)
+retry:
+ seq = read_seqcount_begin(&devnet_rename_seq);
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+ ret = -ENODEV;
+ if (!dev) {
+ rcu_read_unlock();
goto out;
+ }
+
+ strcpy(devname, dev->name);
+ rcu_read_unlock();
+ if (read_seqcount_retry(&devnet_rename_seq, seq))
+ goto retry;
len = strlen(devname) + 1;
diff --git a/trunk/net/ipv4/gre.c b/trunk/net/ipv4/gre.c
index 7856d1651d05..b2e805af9b87 100644
--- a/trunk/net/ipv4/gre.c
+++ b/trunk/net/ipv4/gre.c
@@ -178,7 +178,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
err = __skb_linearize(skb);
if (err) {
- kfree_skb_list(segs);
+ kfree_skb(segs);
segs = ERR_PTR(err);
goto out;
}
diff --git a/trunk/net/ipv4/ip_tunnel.c b/trunk/net/ipv4/ip_tunnel.c
index 7fa8f08fa7ae..be2f8da0ae8e 100644
--- a/trunk/net/ipv4/ip_tunnel.c
+++ b/trunk/net/ipv4/ip_tunnel.c
@@ -853,7 +853,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
}
EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
-int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname)
{
struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
@@ -899,7 +899,7 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
unregister_netdevice_queue(itn->fb_tunnel_dev, head);
}
-void ip_tunnel_delete_net(struct ip_tunnel_net *itn)
+void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn)
{
LIST_HEAD(list);
diff --git a/trunk/net/ipv4/ip_vti.c b/trunk/net/ipv4/ip_vti.c
index c118f6b576bb..9d2bdb2c1d3f 100644
--- a/trunk/net/ipv4/ip_vti.c
+++ b/trunk/net/ipv4/ip_vti.c
@@ -361,7 +361,8 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
tunnel->err_count = 0;
}
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
nf_reset(skb);
diff --git a/trunk/net/ipv4/netfilter/ipt_ULOG.c b/trunk/net/ipv4/netfilter/ipt_ULOG.c
index 32b0e978c8e0..ff4b781b1056 100644
--- a/trunk/net/ipv4/netfilter/ipt_ULOG.c
+++ b/trunk/net/ipv4/netfilter/ipt_ULOG.c
@@ -125,16 +125,15 @@ static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum)
/* timer function to flush queue in flushtimeout time */
static void ulog_timer(unsigned long data)
{
- unsigned int groupnum = *((unsigned int *)data);
struct ulog_net *ulog = container_of((void *)data,
struct ulog_net,
- nlgroup[groupnum]);
+ nlgroup[*(unsigned int *)data]);
pr_debug("timer function called, calling ulog_send\n");
/* lock to protect against somebody modifying our structure
* from ipt_ulog_target at the same time */
spin_lock_bh(&ulog->lock);
- ulog_send(ulog, groupnum);
+ ulog_send(ulog, data);
spin_unlock_bh(&ulog->lock);
}
@@ -408,11 +407,8 @@ static int __net_init ulog_tg_net_init(struct net *net)
spin_lock_init(&ulog->lock);
/* initialize ulog_buffers */
- for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
- ulog->nlgroup[i] = i;
- setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer,
- (unsigned long)&ulog->nlgroup[i]);
- }
+ for (i = 0; i < ULOG_MAXNLGROUPS; i++)
+ setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer, i);
ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
if (!ulog->nflognl)
diff --git a/trunk/net/ipv4/tcp_ipv4.c b/trunk/net/ipv4/tcp_ipv4.c
index 7999fc55c83b..719652305a29 100644
--- a/trunk/net/ipv4/tcp_ipv4.c
+++ b/trunk/net/ipv4/tcp_ipv4.c
@@ -1003,7 +1003,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_info *md5sig;
- key = tcp_md5_do_lookup(sk, addr, family);
+ key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
if (key) {
/* Pre-existing entry - just update that one. */
memcpy(key->key, newkey, newkeylen);
@@ -1048,7 +1048,7 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
struct tcp_md5sig_key *key;
struct tcp_md5sig_info *md5sig;
- key = tcp_md5_do_lookup(sk, addr, family);
+ key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
if (!key)
return -ENOENT;
hlist_del_rcu(&key->node);
diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c
index 4ab4c38958c6..1bbf744c2cc3 100644
--- a/trunk/net/ipv6/addrconf.c
+++ b/trunk/net/ipv6/addrconf.c
@@ -2655,9 +2655,6 @@ static void init_loopback(struct net_device *dev)
if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
continue;
- if (sp_ifa->rt)
- continue;
-
sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
/* Failure cases are ignored */
@@ -4306,7 +4303,6 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
struct inet6_ifaddr *ifp;
struct net_device *dev = idev->dev;
bool update_rs = false;
- struct in6_addr ll_addr;
if (token == NULL)
return -EINVAL;
@@ -4326,9 +4322,11 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
write_unlock_bh(&idev->lock);
- if (!idev->dead && (idev->if_flags & IF_READY) &&
- !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
- IFA_F_OPTIMISTIC)) {
+ if (!idev->dead && (idev->if_flags & IF_READY)) {
+ struct in6_addr ll_addr;
+
+ ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
+ IFA_F_OPTIMISTIC);
/* If we're not ready, then normal ifup will take care
* of this. Otherwise, we need to request our rs here.
diff --git a/trunk/net/ipv6/ip6_output.c b/trunk/net/ipv6/ip6_output.c
index d5d20cde8d92..dae1949019d7 100644
--- a/trunk/net/ipv6/ip6_output.c
+++ b/trunk/net/ipv6/ip6_output.c
@@ -381,8 +381,9 @@ int ip6_forward(struct sk_buff *skb)
* cannot be fragmented, because there is no warranty
* that different fragments will go along one path. --ANK
*/
- if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
- if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
+ if (opt->ra) {
+ u8 *ptr = skb_network_header(skb) + opt->ra;
+ if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
return 0;
}
@@ -821,17 +822,11 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
const struct flowi6 *fl6)
{
struct ipv6_pinfo *np = inet6_sk(sk);
- struct rt6_info *rt;
+ struct rt6_info *rt = (struct rt6_info *)dst;
if (!dst)
goto out;
- if (dst->ops->family != AF_INET6) {
- dst_release(dst);
- return NULL;
- }
-
- rt = (struct rt6_info *)dst;
/* Yes, checking route validity in not connected
* case is not very simple. Take into account,
* that we do not support routing by source, TOS,
diff --git a/trunk/net/ipv6/ndisc.c b/trunk/net/ipv6/ndisc.c
index ca4ffcc287f1..2712ab22a174 100644
--- a/trunk/net/ipv6/ndisc.c
+++ b/trunk/net/ipv6/ndisc.c
@@ -1493,7 +1493,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
*/
if (ha)
- ndisc_fill_addr_option(buff, ND_OPT_TARGET_LL_ADDR, ha);
+ ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha);
/*
* build redirect option and copy skb over to the new packet.
diff --git a/trunk/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/trunk/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index c9b6a6e6a1e8..97bcf2bae857 100644
--- a/trunk/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/trunk/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -204,7 +204,7 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
if (ct != NULL && !nf_ct_is_untracked(ct)) {
help = nfct_help(ct);
if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
- nf_conntrack_get_reasm(reasm);
+ nf_conntrack_get_reasm(skb);
NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
(struct net_device *)in,
(struct net_device *)out,
diff --git a/trunk/net/key/af_key.c b/trunk/net/key/af_key.c
index 9da862070dd8..c5fbd7589681 100644
--- a/trunk/net/key/af_key.c
+++ b/trunk/net/key/af_key.c
@@ -1710,7 +1710,6 @@ static int key_notify_sa_flush(const struct km_event *c)
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
- hdr->sadb_msg_reserved = 0;
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
@@ -2700,7 +2699,6 @@ static int key_notify_policy_flush(const struct km_event *c)
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
- hdr->sadb_msg_reserved = 0;
pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
diff --git a/trunk/net/l2tp/l2tp_ppp.c b/trunk/net/l2tp/l2tp_ppp.c
index 8dec6876dc50..637a341c1e2d 100644
--- a/trunk/net/l2tp/l2tp_ppp.c
+++ b/trunk/net/l2tp/l2tp_ppp.c
@@ -346,19 +346,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
skb_put(skb, 2);
/* Copy user data into skb */
- error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
- total_len);
+ error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
if (error < 0) {
kfree_skb(skb);
goto error_put_sess_tun;
}
+ skb_put(skb, total_len);
l2tp_xmit_skb(session, skb, session->hdr_len);
sock_put(ps->tunnel_sock);
sock_put(sk);
- return total_len;
+ return error;
error_put_sess_tun:
sock_put(ps->tunnel_sock);
diff --git a/trunk/net/mac80211/cfg.c b/trunk/net/mac80211/cfg.c
index 4fdb306e42e0..1a89c80e6407 100644
--- a/trunk/net/mac80211/cfg.c
+++ b/trunk/net/mac80211/cfg.c
@@ -1057,12 +1057,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
- if (sdata->wdev.cac_started) {
- cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
- cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED,
- GFP_KERNEL);
- }
-
drv_stop_ap(sdata->local, sdata);
/* free all potentially still buffered bcast frames */
diff --git a/trunk/net/mac80211/ieee80211_i.h b/trunk/net/mac80211/ieee80211_i.h
index 9ca8e3278cc0..44be28cfc6c4 100644
--- a/trunk/net/mac80211/ieee80211_i.h
+++ b/trunk/net/mac80211/ieee80211_i.h
@@ -1497,11 +1497,10 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
ieee80211_tx_skb_tid(sdata, skb, 7);
}
-u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action,
struct ieee802_11_elems *elems,
u64 filter, u32 crc);
-static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
- bool action,
+static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action,
struct ieee802_11_elems *elems)
{
ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c
index 741448b30825..a8c2130c8ba4 100644
--- a/trunk/net/mac80211/mlme.c
+++ b/trunk/net/mac80211/mlme.c
@@ -2522,11 +2522,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
u16 capab_info, aid;
struct ieee802_11_elems elems;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
- const struct cfg80211_bss_ies *bss_ies = NULL;
- struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
u32 changed = 0;
int err;
- bool ret;
/* AssocResp and ReassocResp have identical structure */
@@ -2557,69 +2554,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
ifmgd->aid = aid;
- /*
- * Some APs are erroneously not including some information in their
- * (re)association response frames. Try to recover by using the data
- * from the beacon or probe response. This seems to afflict mobile
- * 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
- * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
- */
- if ((assoc_data->wmm && !elems.wmm_param) ||
- (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
- (!elems.ht_cap_elem || !elems.ht_operation)) ||
- (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
- (!elems.vht_cap_elem || !elems.vht_operation))) {
- const struct cfg80211_bss_ies *ies;
- struct ieee802_11_elems bss_elems;
-
- rcu_read_lock();
- ies = rcu_dereference(cbss->ies);
- if (ies)
- bss_ies = kmemdup(ies, sizeof(*ies) + ies->len,
- GFP_ATOMIC);
- rcu_read_unlock();
- if (!bss_ies)
- return false;
-
- ieee802_11_parse_elems(bss_ies->data, bss_ies->len,
- false, &bss_elems);
- if (assoc_data->wmm &&
- !elems.wmm_param && bss_elems.wmm_param) {
- elems.wmm_param = bss_elems.wmm_param;
- sdata_info(sdata,
- "AP bug: WMM param missing from AssocResp\n");
- }
-
- /*
- * Also check if we requested HT/VHT, otherwise the AP doesn't
- * have to include the IEs in the (re)association response.
- */
- if (!elems.ht_cap_elem && bss_elems.ht_cap_elem &&
- !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
- elems.ht_cap_elem = bss_elems.ht_cap_elem;
- sdata_info(sdata,
- "AP bug: HT capability missing from AssocResp\n");
- }
- if (!elems.ht_operation && bss_elems.ht_operation &&
- !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
- elems.ht_operation = bss_elems.ht_operation;
- sdata_info(sdata,
- "AP bug: HT operation missing from AssocResp\n");
- }
- if (!elems.vht_cap_elem && bss_elems.vht_cap_elem &&
- !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
- elems.vht_cap_elem = bss_elems.vht_cap_elem;
- sdata_info(sdata,
- "AP bug: VHT capa missing from AssocResp\n");
- }
- if (!elems.vht_operation && bss_elems.vht_operation &&
- !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
- elems.vht_operation = bss_elems.vht_operation;
- sdata_info(sdata,
- "AP bug: VHT operation missing from AssocResp\n");
- }
- }
-
/*
* We previously checked these in the beacon/probe response, so
* they should be present here. This is just a safety net.
@@ -2627,17 +2561,15 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
(!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
sdata_info(sdata,
- "HT AP is missing WMM params or HT capability/operation\n");
- ret = false;
- goto out;
+ "HT AP is missing WMM params or HT capability/operation in AssocResp\n");
+ return false;
}
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
(!elems.vht_cap_elem || !elems.vht_operation)) {
sdata_info(sdata,
- "VHT AP is missing VHT capability/operation\n");
- ret = false;
- goto out;
+ "VHT AP is missing VHT capability/operation in AssocResp\n");
+ return false;
}
mutex_lock(&sdata->local->sta_mtx);
@@ -2648,8 +2580,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
sta = sta_info_get(sdata, cbss->bssid);
if (WARN_ON(!sta)) {
mutex_unlock(&sdata->local->sta_mtx);
- ret = false;
- goto out;
+ return false;
}
sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
@@ -2702,8 +2633,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
sta->sta.addr);
WARN_ON(__sta_info_destroy(sta));
mutex_unlock(&sdata->local->sta_mtx);
- ret = false;
- goto out;
+ return false;
}
mutex_unlock(&sdata->local->sta_mtx);
@@ -2743,10 +2673,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
ieee80211_sta_reset_beacon_monitor(sdata);
- ret = true;
- out:
- kfree(bss_ies);
- return ret;
+ return true;
}
static enum rx_mgmt_action __must_check
diff --git a/trunk/net/mac80211/rate.c b/trunk/net/mac80211/rate.c
index a02bef35b134..d3f414fe67e0 100644
--- a/trunk/net/mac80211/rate.c
+++ b/trunk/net/mac80211/rate.c
@@ -615,7 +615,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
if (rates[i].idx < 0)
break;
- rate_idx_match_mask(&rates[i], sband, chan_width, mask,
+ rate_idx_match_mask(&rates[i], sband, mask, chan_width,
mcs_mask);
}
}
diff --git a/trunk/net/mac80211/util.c b/trunk/net/mac80211/util.c
index 72e6292955bb..27e07150eb46 100644
--- a/trunk/net/mac80211/util.c
+++ b/trunk/net/mac80211/util.c
@@ -661,12 +661,12 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_queue_delayed_work);
-u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action,
struct ieee802_11_elems *elems,
u64 filter, u32 crc)
{
size_t left = len;
- const u8 *pos = start;
+ u8 *pos = start;
bool calc_crc = filter != 0;
DECLARE_BITMAP(seen_elems, 256);
const u8 *ie;
diff --git a/trunk/net/netfilter/ipvs/ip_vs_core.c b/trunk/net/netfilter/ipvs/ip_vs_core.c
index 23b8eb53a569..05565d2b3a61 100644
--- a/trunk/net/netfilter/ipvs/ip_vs_core.c
+++ b/trunk/net/netfilter/ipvs/ip_vs_core.c
@@ -1442,8 +1442,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
/* do the statistics and put it back */
ip_vs_in_stats(cp, skb);
- if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
- IPPROTO_SCTP == cih->protocol)
+ if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
offset += 2 * sizeof(__u16);
verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
diff --git a/trunk/net/netfilter/nf_conntrack_labels.c b/trunk/net/netfilter/nf_conntrack_labels.c
index 355d2ef08094..8fe2e99428b7 100644
--- a/trunk/net/netfilter/nf_conntrack_labels.c
+++ b/trunk/net/netfilter/nf_conntrack_labels.c
@@ -45,7 +45,7 @@ int nf_connlabel_set(struct nf_conn *ct, u16 bit)
if (test_bit(bit, labels->bits))
return 0;
- if (!test_and_set_bit(bit, labels->bits))
+ if (test_and_set_bit(bit, labels->bits))
nf_conntrack_event_cache(IPCT_LABEL, ct);
return 0;
diff --git a/trunk/net/netfilter/nf_conntrack_netlink.c b/trunk/net/netfilter/nf_conntrack_netlink.c
index ecf065f94032..6d0f8a17c5b7 100644
--- a/trunk/net/netfilter/nf_conntrack_netlink.c
+++ b/trunk/net/netfilter/nf_conntrack_netlink.c
@@ -1825,7 +1825,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
(1 << IPCT_ASSURED) |
(1 << IPCT_HELPER) |
- (1 << IPCT_LABEL) |
(1 << IPCT_PROTOINFO) |
(1 << IPCT_NATSEQADJ) |
(1 << IPCT_MARK),
diff --git a/trunk/net/netfilter/nf_nat_sip.c b/trunk/net/netfilter/nf_nat_sip.c
index dac11f73868e..96ccdf78a29f 100644
--- a/trunk/net/netfilter/nf_nat_sip.c
+++ b/trunk/net/netfilter/nf_nat_sip.c
@@ -230,10 +230,9 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
&ct->tuplehash[!dir].tuple.src.u3,
false);
if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
- poff, plen, buffer, buflen)) {
+ poff, plen, buffer, buflen))
nf_ct_helper_log(skb, ct, "cannot mangle received");
return NF_DROP;
- }
}
/* The rport= parameter (RFC 3581) contains the port number
diff --git a/trunk/net/netfilter/xt_TCPMSS.c b/trunk/net/netfilter/xt_TCPMSS.c
index 7011c71646f0..afaebc766933 100644
--- a/trunk/net/netfilter/xt_TCPMSS.c
+++ b/trunk/net/netfilter/xt_TCPMSS.c
@@ -45,22 +45,17 @@ optlen(const u_int8_t *opt, unsigned int offset)
static int
tcpmss_mangle_packet(struct sk_buff *skb,
- const struct xt_action_param *par,
+ const struct xt_tcpmss_info *info,
unsigned int in_mtu,
unsigned int tcphoff,
unsigned int minlen)
{
- const struct xt_tcpmss_info *info = par->targinfo;
struct tcphdr *tcph;
unsigned int tcplen, i;
__be16 oldval;
u16 newmss;
u8 *opt;
- /* This is a fragment, no TCP header is available */
- if (par->fragoff != 0)
- return XT_CONTINUE;
-
if (!skb_make_writable(skb, skb->len))
return -1;
@@ -130,17 +125,11 @@ tcpmss_mangle_packet(struct sk_buff *skb,
skb_put(skb, TCPOLEN_MSS);
- /*
- * IPv4: RFC 1122 states "If an MSS option is not received at
- * connection setup, TCP MUST assume a default send MSS of 536".
- * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
- * length IPv6 header of 60, ergo the default MSS value is 1220
- * Since no MSS was provided, we must use the default values
+ /* RFC 879 states that the default MSS is 536 without specific
+ * knowledge that the destination host is prepared to accept larger.
+ * Since no MSS was provided, we MUST NOT set a value > 536.
*/
- if (par->family == NFPROTO_IPV4)
- newmss = min(newmss, (u16)536);
- else
- newmss = min(newmss, (u16)1220);
+ newmss = min(newmss, (u16)536);
opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
@@ -199,7 +188,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
__be16 newlen;
int ret;
- ret = tcpmss_mangle_packet(skb, par,
+ ret = tcpmss_mangle_packet(skb, par->targinfo,
tcpmss_reverse_mtu(skb, PF_INET),
iph->ihl * 4,
sizeof(*iph) + sizeof(struct tcphdr));
@@ -228,7 +217,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
if (tcphoff < 0)
return NF_DROP;
- ret = tcpmss_mangle_packet(skb, par,
+ ret = tcpmss_mangle_packet(skb, par->targinfo,
tcpmss_reverse_mtu(skb, PF_INET6),
tcphoff,
sizeof(*ipv6h) + sizeof(struct tcphdr));
diff --git a/trunk/net/netfilter/xt_TCPOPTSTRIP.c b/trunk/net/netfilter/xt_TCPOPTSTRIP.c
index b68fa191710f..1eb1a44bfd3d 100644
--- a/trunk/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/trunk/net/netfilter/xt_TCPOPTSTRIP.c
@@ -48,13 +48,11 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
return NF_DROP;
len = skb->len - tcphoff;
- if (len < (int)sizeof(struct tcphdr))
+ if (len < (int)sizeof(struct tcphdr) ||
+ tcp_hdr(skb)->doff * 4 > len)
return NF_DROP;
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
- if (tcph->doff * 4 > len)
- return NF_DROP;
-
opt = (u_int8_t *)tcph;
/*
diff --git a/trunk/net/packet/af_packet.c b/trunk/net/packet/af_packet.c
index 20a1bd0e6549..8ec1bca7f859 100644
--- a/trunk/net/packet/af_packet.c
+++ b/trunk/net/packet/af_packet.c
@@ -2851,11 +2851,12 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET;
- memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
if (dev)
- strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
+ strncpy(uaddr->sa_data, dev->name, 14);
+ else
+ memset(uaddr->sa_data, 0, 14);
rcu_read_unlock();
*uaddr_len = sizeof(*uaddr);
diff --git a/trunk/net/sctp/outqueue.c b/trunk/net/sctp/outqueue.c
index be35e2dbcc9a..32a4625fef77 100644
--- a/trunk/net/sctp/outqueue.c
+++ b/trunk/net/sctp/outqueue.c
@@ -206,8 +206,6 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
*/
void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
{
- memset(q, 0, sizeof(struct sctp_outq));
-
q->asoc = asoc;
INIT_LIST_HEAD(&q->out_chunk_list);
INIT_LIST_HEAD(&q->control_chunk_list);
@@ -215,7 +213,11 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
+ q->fast_rtx = 0;
+ q->outstanding_bytes = 0;
q->empty = 1;
+ q->cork = 0;
+ q->out_qlen = 0;
}
/* Free the outqueue structure and any related pending chunks.
diff --git a/trunk/net/wireless/nl80211.c b/trunk/net/wireless/nl80211.c
index b14b7e3cb6e6..d5aed3bb3945 100644
--- a/trunk/net/wireless/nl80211.c
+++ b/trunk/net/wireless/nl80211.c
@@ -1564,17 +1564,12 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
struct cfg80211_registered_device *dev;
s64 filter_wiphy = -1;
bool split = false;
- struct nlattr **tb;
+ struct nlattr **tb = nl80211_fam.attrbuf;
int res;
- /* will be zeroed in nlmsg_parse() */
- tb = kmalloc(sizeof(*tb) * (NL80211_ATTR_MAX + 1), GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
-
mutex_lock(&cfg80211_mutex);
res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
- tb, NL80211_ATTR_MAX, nl80211_policy);
+ tb, nl80211_fam.maxattr, nl80211_policy);
if (res == 0) {
split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
if (tb[NL80211_ATTR_WIPHY])
@@ -1588,7 +1583,6 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
netdev = dev_get_by_index(sock_net(skb->sk), ifidx);
if (!netdev) {
mutex_unlock(&cfg80211_mutex);
- kfree(tb);
return -ENODEV;
}
if (netdev->ieee80211_ptr) {
@@ -1599,7 +1593,6 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
dev_put(netdev);
}
}
- kfree(tb);
list_for_each_entry(dev, &cfg80211_rdev_list, list) {
if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
diff --git a/trunk/sound/core/pcm_native.c b/trunk/sound/core/pcm_native.c
index f92818155958..ccfa383f1fda 100644
--- a/trunk/sound/core/pcm_native.c
+++ b/trunk/sound/core/pcm_native.c
@@ -1649,7 +1649,6 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
}
if (!snd_pcm_stream_linked(substream)) {
substream->group = group;
- group = NULL;
spin_lock_init(&substream->group->lock);
INIT_LIST_HEAD(&substream->group->substreams);
list_add_tail(&substream->link_list, &substream->group->substreams);
@@ -1664,7 +1663,8 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
_nolock:
snd_card_unref(substream1->pcm->card);
fput_light(file, fput_needed);
- kfree(group);
+ if (res < 0)
+ kfree(group);
return res;
}
diff --git a/trunk/sound/pci/hda/patch_cirrus.c b/trunk/sound/pci/hda/patch_cirrus.c
index cccaf9c7a7bb..bd8d46cca2b3 100644
--- a/trunk/sound/pci/hda/patch_cirrus.c
+++ b/trunk/sound/pci/hda/patch_cirrus.c
@@ -58,7 +58,6 @@ enum {
CS420X_GPIO_23,
CS420X_MBP101,
CS420X_MBP81,
- CS420X_MBA42,
CS420X_AUTO,
/* aliases */
CS420X_IMAC27_122 = CS420X_GPIO_23,
@@ -347,7 +346,6 @@ static const struct hda_model_fixup cs420x_models[] = {
{ .id = CS420X_APPLE, .name = "apple" },
{ .id = CS420X_MBP101, .name = "mbp101" },
{ .id = CS420X_MBP81, .name = "mbp81" },
- { .id = CS420X_MBA42, .name = "mba42" },
{}
};
@@ -363,7 +361,6 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
- SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
{} /* terminator */
};
@@ -417,20 +414,6 @@ static const struct hda_pintbl mbp101_pincfgs[] = {
{} /* terminator */
};
-static const struct hda_pintbl mba42_pincfgs[] = {
- { 0x09, 0x012b4030 }, /* HP */
- { 0x0a, 0x400000f0 },
- { 0x0b, 0x90100120 }, /* speaker */
- { 0x0c, 0x400000f0 },
- { 0x0d, 0x90a00110 }, /* mic */
- { 0x0e, 0x400000f0 },
- { 0x0f, 0x400000f0 },
- { 0x10, 0x400000f0 },
- { 0x12, 0x400000f0 },
- { 0x15, 0x400000f0 },
- {} /* terminator */
-};
-
static void cs420x_fixup_gpio_13(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -499,12 +482,6 @@ static const struct hda_fixup cs420x_fixups[] = {
.chained = true,
.chain_id = CS420X_GPIO_13,
},
- [CS420X_MBA42] = {
- .type = HDA_FIXUP_PINS,
- .v.pins = mba42_pincfgs,
- .chained = true,
- .chain_id = CS420X_GPIO_13,
- },
};
static struct cs_spec *cs_alloc_spec(struct hda_codec *codec, int vendor_nid)
diff --git a/trunk/sound/pci/hda/patch_realtek.c b/trunk/sound/pci/hda/patch_realtek.c
index 403010c9e82e..02e22b4458d2 100644
--- a/trunk/sound/pci/hda/patch_realtek.c
+++ b/trunk/sound/pci/hda/patch_realtek.c
@@ -3483,7 +3483,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05e0, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -3495,8 +3494,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
@@ -3599,8 +3596,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"},
{.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
- {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
- {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
{}
};
@@ -4280,7 +4275,6 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
{.id = ALC662_FIXUP_ASUS_MODE7, .name = "asus-mode7"},
{.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"},
{.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"},
- {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
{}
};
diff --git a/trunk/sound/soc/samsung/idma.c b/trunk/sound/soc/samsung/idma.c
index ce1e1e16f250..6e5fed30aa27 100644
--- a/trunk/sound/soc/samsung/idma.c
+++ b/trunk/sound/soc/samsung/idma.c
@@ -257,6 +257,7 @@ static int idma_mmap(struct snd_pcm_substream *substream,
/* From snd_pcm_lib_mmap_iomem */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
size = vma->vm_end - vma->vm_start;
offset = vma->vm_pgoff << PAGE_SHIFT;
ret = io_remap_pfn_range(vma, vma->vm_start,
diff --git a/trunk/sound/usb/card.c b/trunk/sound/usb/card.c
index 64952e2d3ed1..1a033177b83f 100644
--- a/trunk/sound/usb/card.c
+++ b/trunk/sound/usb/card.c
@@ -147,32 +147,14 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
return -EINVAL;
}
- alts = &iface->altsetting[0];
- altsd = get_iface_desc(alts);
-
- /*
- * Android with both accessory and audio interfaces enabled gets the
- * interface numbers wrong.
- */
- if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) ||
- chip->usb_id == USB_ID(0x18d1, 0x2d05)) &&
- interface == 0 &&
- altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
- altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) {
- interface = 2;
- iface = usb_ifnum_to_if(dev, interface);
- if (!iface)
- return -EINVAL;
- alts = &iface->altsetting[0];
- altsd = get_iface_desc(alts);
- }
-
if (usb_interface_claimed(iface)) {
snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n",
dev->devnum, ctrlif, interface);
return -EINVAL;
}
+ alts = &iface->altsetting[0];
+ altsd = get_iface_desc(alts);
if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
diff --git a/trunk/sound/usb/mixer.c b/trunk/sound/usb/mixer.c
index d5438083fd6a..e5c7f9f20fdd 100644
--- a/trunk/sound/usb/mixer.c
+++ b/trunk/sound/usb/mixer.c
@@ -885,7 +885,6 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
- case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
case USB_ID(0x046d, 0x0991):