From 08ce45938c08a9e4303ff32d0df1aa23dbfcd5f1 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Tue, 31 May 2011 15:14:07 -0600 Subject: [PATCH] --- yaml --- r: 253103 b: refs/heads/master c: f2a4d8ae4d40f6f5482d207f47fd4d53b3ae0ed4 h: refs/heads/master i: 253101: 20b681cb4842f9e91bf1f35a67451f69746e1631 253099: 52a6990dbc9ef0e76bdebf82dbfd25f921b783e9 253095: 4d968860c7cd0c9800cbb96aa500d0cfb5d003d3 253087: 7e2269509d928bb77888844ac078c8333f78d14f v: v3 --- [refs] | 2 +- trunk/Documentation/kernel-parameters.txt | 5 +- trunk/Documentation/virtual/lguest/Makefile | 2 +- trunk/Documentation/virtual/lguest/lguest.c | 22 +- trunk/arch/arm/mach-shmobile/board-ap4evb.c | 56 ---- trunk/arch/arm/mach-shmobile/board-mackerel.c | 78 ----- trunk/arch/arm/mach-shmobile/clock-sh7372.c | 7 - .../arch/arm/mach-tegra/board-harmony-power.c | 4 +- trunk/arch/arm/mach-tegra/board-harmony.h | 3 +- trunk/arch/blackfin/lib/strncpy.S | 2 +- trunk/arch/ia64/include/asm/unistd.h | 3 +- trunk/arch/ia64/kernel/entry.S | 1 - trunk/arch/powerpc/platforms/powermac/pic.c | 3 +- trunk/arch/sh/Kconfig | 2 +- trunk/arch/sh/boards/mach-ap325rxa/setup.c | 32 +- trunk/arch/sh/boards/mach-ecovec24/setup.c | 3 - trunk/arch/sh/include/asm/pgtable.h | 1 - trunk/arch/sh/include/asm/ptrace.h | 6 +- trunk/arch/sh/include/asm/tlb.h | 1 - trunk/arch/sh/include/cpu-sh4/cpu/sh7722.h | 1 - trunk/arch/sh/include/cpu-sh4/cpu/sh7724.h | 1 - trunk/arch/sh/include/cpu-sh4/cpu/sh7757.h | 1 - trunk/arch/sh/kernel/process_32.c | 1 - trunk/arch/sh/mm/consistent.c | 2 +- trunk/arch/x86/kernel/Makefile | 2 - trunk/arch/x86/kernel/process.c | 2 +- trunk/arch/x86/kernel/smpboot.c | 2 +- trunk/arch/x86/lguest/boot.c | 1 - trunk/block/blk-ioc.c | 4 +- trunk/block/cfq-iosched.c | 11 +- trunk/drivers/block/nbd.c | 22 +- trunk/drivers/block/paride/pcd.c | 1 + trunk/drivers/block/virtio_blk.c | 91 +----- trunk/drivers/block/xen-blkback/blkback.c | 10 +- trunk/drivers/block/xen-blkback/xenbus.c | 3 +- trunk/drivers/bluetooth/hci_ldisc.c | 17 +- trunk/drivers/cdrom/viocd.c | 1 + trunk/drivers/char/virtio_console.c | 5 + trunk/drivers/clocksource/sh_cmt.c | 12 +- trunk/drivers/clocksource/sh_tmu.c | 12 +- trunk/drivers/dma/shdma.c | 9 +- trunk/drivers/ide/ide-cd.c | 1 + trunk/drivers/input/serio/serport.c | 10 +- trunk/drivers/isdn/gigaset/ser-gigaset.c | 8 +- trunk/drivers/misc/kgdbts.c | 5 +- trunk/drivers/misc/ti-st/st_core.c | 6 +- trunk/drivers/net/3c509.c | 14 +- trunk/drivers/net/3c59x.c | 4 +- trunk/drivers/net/caif/caif_serial.c | 6 +- trunk/drivers/net/can/flexcan.c | 5 +- trunk/drivers/net/can/slcan.c | 9 +- trunk/drivers/net/davinci_emac.c | 10 +- trunk/drivers/net/depca.c | 35 +- trunk/drivers/net/dm9000.c | 6 +- trunk/drivers/net/hamradio/6pack.c | 8 +- trunk/drivers/net/hamradio/mkiss.c | 11 +- trunk/drivers/net/hp100.c | 12 +- trunk/drivers/net/ibmlana.c | 4 +- trunk/drivers/net/irda/irtty-sir.c | 16 +- trunk/drivers/net/irda/smsc-ircc2.c | 44 +-- trunk/drivers/net/ks8842.c | 2 +- trunk/drivers/net/ne3210.c | 15 +- trunk/drivers/net/ppp_async.c | 6 +- trunk/drivers/net/ppp_synctty.c | 6 +- trunk/drivers/net/slip.c | 11 +- trunk/drivers/net/smc-mca.c | 6 +- trunk/drivers/net/tg3.c | 2 +- trunk/drivers/net/tokenring/madgemc.c | 2 +- trunk/drivers/net/tulip/de4x5.c | 4 +- trunk/drivers/net/usb/catc.c | 2 +- trunk/drivers/net/usb/cdc_ncm.c | 3 +- trunk/drivers/net/virtio_net.c | 2 +- trunk/drivers/net/wan/x25_asy.c | 7 +- trunk/drivers/net/wireless/ath/ath9k/Kconfig | 1 + .../net/wireless/ath/ath9k/ar9002_calib.c | 2 +- .../net/wireless/ath/ath9k/ar9003_eeprom.c | 10 +- .../net/wireless/ath/ath9k/ar9003_phy.c | 22 -- .../net/wireless/ath/ath9k/eeprom_9287.c | 10 +- trunk/drivers/net/wireless/ath/ath9k/hw.c | 5 +- trunk/drivers/net/wireless/ath/ath9k/hw.h | 2 - trunk/drivers/net/wireless/ath/ath9k/main.c | 4 +- trunk/drivers/net/wireless/ath/ath9k/rc.c | 3 +- trunk/drivers/net/wireless/b43/phy_n.c | 2 +- .../net/wireless/iwlegacy/iwl-4965-lib.c | 4 +- .../drivers/net/wireless/iwlegacy/iwl-4965.c | 2 +- trunk/drivers/net/wireless/iwlwifi/iwl-6000.c | 28 +- trunk/drivers/net/wireless/iwlwifi/iwl-agn.c | 6 +- trunk/drivers/net/wireless/iwlwifi/iwl-agn.h | 1 - trunk/drivers/net/wireless/libertas/cmd.c | 6 +- trunk/drivers/net/wireless/mwifiex/sdio.h | 4 +- trunk/drivers/net/wireless/rt2x00/Kconfig | 1 + trunk/drivers/net/wireless/rtlwifi/pci.c | 35 +- trunk/drivers/net/wireless/wl12xx/conf.h | 3 - trunk/drivers/net/wireless/wl12xx/main.c | 1 - trunk/drivers/net/wireless/wl12xx/scan.c | 49 +-- trunk/drivers/net/wireless/wl12xx/scan.h | 3 - trunk/drivers/net/wireless/zd1211rw/zd_usb.c | 53 +-- trunk/drivers/pci/dmar.c | 7 +- trunk/drivers/pci/intel-iommu.c | 240 +++----------- trunk/drivers/pci/iova.c | 12 +- trunk/drivers/scsi/scsi_scan.c | 2 +- trunk/drivers/scsi/scsi_sysfs.c | 1 - trunk/drivers/tty/n_gsm.c | 6 +- trunk/drivers/tty/n_hdlc.c | 18 +- trunk/drivers/tty/n_r3964.c | 10 +- trunk/drivers/tty/n_tty.c | 61 +--- trunk/drivers/tty/tty_buffer.c | 15 +- trunk/drivers/tty/vt/selection.c | 3 +- trunk/drivers/vhost/net.c | 12 +- trunk/drivers/vhost/test.c | 6 +- trunk/drivers/vhost/vhost.c | 138 ++------ trunk/drivers/vhost/vhost.h | 21 +- trunk/drivers/virtio/virtio_balloon.c | 21 +- trunk/drivers/virtio/virtio_ring.c | 53 +-- trunk/fs/autofs4/root.c | 2 + trunk/fs/block_dev.c | 4 +- trunk/fs/btrfs/btrfs_inode.h | 3 + trunk/fs/btrfs/ctree.c | 28 +- trunk/fs/btrfs/ctree.h | 22 +- trunk/fs/btrfs/delayed-inode.c | 8 +- trunk/fs/btrfs/disk-io.c | 36 +-- trunk/fs/btrfs/extent-tree.c | 103 ++---- trunk/fs/btrfs/extent_io.c | 2 +- trunk/fs/btrfs/file.c | 10 +- trunk/fs/btrfs/free-space-cache.c | 70 +--- trunk/fs/btrfs/inode-map.c | 34 +- trunk/fs/btrfs/inode.c | 261 +++++++-------- trunk/fs/btrfs/ioctl.c | 26 +- trunk/fs/btrfs/relocation.c | 34 +- trunk/fs/btrfs/scrub.c | 123 +++---- trunk/fs/btrfs/super.c | 8 +- trunk/fs/btrfs/transaction.c | 302 ++++++++---------- trunk/fs/btrfs/transaction.h | 29 +- trunk/fs/btrfs/volumes.c | 2 +- trunk/fs/btrfs/xattr.c | 2 + trunk/fs/namei.c | 3 - trunk/fs/partitions/check.c | 10 +- trunk/fs/ubifs/io.c | 2 - trunk/fs/ubifs/journal.c | 1 - trunk/fs/ubifs/orphan.c | 2 +- trunk/fs/ubifs/recovery.c | 164 ++++------ trunk/fs/ubifs/replay.c | 3 +- trunk/fs/ubifs/shrinker.c | 6 +- trunk/fs/ubifs/super.c | 42 ++- trunk/fs/ubifs/tnc.c | 9 +- trunk/fs/ubifs/ubifs.h | 4 +- trunk/include/asm-generic/unistd.h | 4 +- trunk/include/linux/blkdev.h | 4 +- trunk/include/linux/dma_remapping.h | 4 - trunk/include/linux/genhd.h | 1 - trunk/include/linux/ieee80211.h | 8 +- trunk/include/linux/if_packet.h | 1 - trunk/include/linux/mtd/physmap.h | 1 - trunk/include/linux/tty_ldisc.h | 9 +- trunk/include/linux/virtio.h | 9 - trunk/include/linux/virtio_9p.h | 25 +- trunk/include/linux/virtio_balloon.h | 25 +- trunk/include/linux/virtio_blk.h | 25 +- trunk/include/linux/virtio_config.h | 25 +- trunk/include/linux/virtio_console.h | 26 +- trunk/include/linux/virtio_ids.h | 24 +- trunk/include/linux/virtio_net.h | 25 +- trunk/include/linux/virtio_pci.h | 23 -- trunk/include/linux/virtio_ring.h | 52 +-- trunk/include/net/sctp/command.h | 1 - trunk/include/net/sctp/structs.h | 2 +- trunk/include/trace/events/net.h | 12 +- trunk/kernel/rcutree.c | 54 +--- trunk/kernel/rcutree_plugin.h | 11 +- trunk/lib/Kconfig.debug | 2 +- trunk/mm/page_alloc.c | 4 + trunk/net/8021q/vlan_dev.c | 2 +- trunk/net/bluetooth/l2cap_core.c | 2 +- trunk/net/caif/chnl_net.c | 9 +- trunk/net/core/dev.c | 7 +- trunk/net/ipv4/af_inet.c | 3 - trunk/net/ipv4/ip_options.c | 15 +- trunk/net/mac80211/mlme.c | 7 - trunk/net/mac80211/scan.c | 1 + trunk/net/packet/af_packet.c | 15 +- trunk/net/sctp/associola.c | 23 +- trunk/net/sctp/sm_sideeffect.c | 3 - trunk/net/sctp/sm_statefuns.c | 14 +- trunk/net/wireless/nl80211.c | 4 +- trunk/net/wireless/scan.c | 43 ++- trunk/security/apparmor/lsm.c | 3 +- trunk/sound/soc/codecs/cx20442.c | 8 +- trunk/tools/testing/ktest/ktest.pl | 8 +- trunk/tools/virtio/virtio_test.c | 19 +- 189 files changed, 1189 insertions(+), 2338 deletions(-) diff --git a/[refs] b/[refs] index 6653b5bbfe63..0e40c17e151c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: e6ece70732b905742ad91a7b5489e0ca1362c0cd +refs/heads/master: f2a4d8ae4d40f6f5482d207f47fd4d53b3ae0ed4 diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index d9a203b058f1..5438a2d7907f 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -999,10 +999,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. With this option on every unmap_single operation will result in a hardware IOTLB flush operation as opposed to batching them for performance. - sp_off [Default Off] - By default, super page will be supported if Intel IOMMU - has the capability. With this option, super page will - not be supported. + intremap= [X86-64, Intel-IOMMU] Format: { on (default) | off | nosid } on enable Interrupt Remapping (default) diff --git a/trunk/Documentation/virtual/lguest/Makefile b/trunk/Documentation/virtual/lguest/Makefile index 0ac34206f7a7..bebac6b4f332 100644 --- a/trunk/Documentation/virtual/lguest/Makefile +++ b/trunk/Documentation/virtual/lguest/Makefile @@ -1,5 +1,5 @@ # This creates the demonstration utility "lguest" which runs a Linux guest. -# Missing headers? Add "-I../../../include -I../../../arch/x86/include" +# Missing headers? Add "-I../../include -I../../arch/x86/include" CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE all: lguest diff --git a/trunk/Documentation/virtual/lguest/lguest.c b/trunk/Documentation/virtual/lguest/lguest.c index cd9d6af61d07..d9da7e148538 100644 --- a/trunk/Documentation/virtual/lguest/lguest.c +++ b/trunk/Documentation/virtual/lguest/lguest.c @@ -49,7 +49,7 @@ #include #include #include -#include "../../../include/linux/lguest_launcher.h" +#include "../../include/linux/lguest_launcher.h" /*L:110 * We can ignore the 42 include files we need for this program, but I do want * to draw attention to the use of kernel-style types. @@ -135,6 +135,9 @@ struct device { /* Is it operational */ bool running; + /* Does Guest want an intrrupt on empty? */ + bool irq_on_empty; + /* Device-specific data. */ void *priv; }; @@ -634,7 +637,10 @@ static void trigger_irq(struct virtqueue *vq) /* If they don't want an interrupt, don't send one... */ if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) { - return; + /* ... unless they've asked us to force one on empty. */ + if (!vq->dev->irq_on_empty + || lg_last_avail(vq) != vq->vring.avail->idx) + return; } /* Send the Guest an interrupt tell them we used something up. */ @@ -1051,6 +1057,15 @@ static void create_thread(struct virtqueue *vq) close(vq->eventfd); } +static bool accepted_feature(struct device *dev, unsigned int bit) +{ + const u8 *features = get_feature_bits(dev) + dev->feature_len; + + if (dev->feature_len < bit / CHAR_BIT) + return false; + return features[bit / CHAR_BIT] & (1 << (bit % CHAR_BIT)); +} + static void start_device(struct device *dev) { unsigned int i; @@ -1064,6 +1079,8 @@ static void start_device(struct device *dev) verbose(" %02x", get_feature_bits(dev) [dev->feature_len+i]); + dev->irq_on_empty = accepted_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY); + for (vq = dev->vq; vq; vq = vq->next) { if (vq->service) create_thread(vq); @@ -1547,6 +1564,7 @@ static void setup_tun_net(char *arg) /* Set up the tun device. */ configure_device(ipfd, tapif, ip); + add_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY); /* Expect Guest to handle everything except UFO */ add_feature(dev, VIRTIO_NET_F_CSUM); add_feature(dev, VIRTIO_NET_F_GUEST_CSUM); diff --git a/trunk/arch/arm/mach-shmobile/board-ap4evb.c b/trunk/arch/arm/mach-shmobile/board-ap4evb.c index f6b687f61c28..08acb6ec8139 100644 --- a/trunk/arch/arm/mach-shmobile/board-ap4evb.c +++ b/trunk/arch/arm/mach-shmobile/board-ap4evb.c @@ -249,29 +249,6 @@ static int slot_cn7_get_cd(struct platform_device *pdev) { return !gpio_get_value(GPIO_PORT41); } -/* MERAM */ -static struct sh_mobile_meram_info meram_info = { - .addr_mode = SH_MOBILE_MERAM_MODE1, -}; - -static struct resource meram_resources[] = { - [0] = { - .name = "MERAM", - .start = 0xe8000000, - .end = 0xe81fffff, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device meram_device = { - .name = "sh_mobile_meram", - .id = 0, - .num_resources = ARRAY_SIZE(meram_resources), - .resource = meram_resources, - .dev = { - .platform_data = &meram_info, - }, -}; /* SH_MMCIF */ static struct resource sh_mmcif_resources[] = { @@ -470,29 +447,13 @@ const static struct fb_videomode ap4evb_lcdc_modes[] = { #endif }, }; -static struct sh_mobile_meram_cfg lcd_meram_cfg = { - .icb[0] = { - .marker_icb = 28, - .cache_icb = 24, - .meram_offset = 0x0, - .meram_size = 0x40, - }, - .icb[1] = { - .marker_icb = 29, - .cache_icb = 25, - .meram_offset = 0x40, - .meram_size = 0x40, - }, -}; static struct sh_mobile_lcdc_info lcdc_info = { - .meram_dev = &meram_info, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .bpp = 16, .lcd_cfg = ap4evb_lcdc_modes, .num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes), - .meram_cfg = &lcd_meram_cfg, } }; @@ -763,31 +724,15 @@ static struct platform_device fsi_device = { static struct platform_device fsi_ak4643_device = { .name = "sh_fsi2_a_ak4643", }; -static struct sh_mobile_meram_cfg hdmi_meram_cfg = { - .icb[0] = { - .marker_icb = 30, - .cache_icb = 26, - .meram_offset = 0x80, - .meram_size = 0x100, - }, - .icb[1] = { - .marker_icb = 31, - .cache_icb = 27, - .meram_offset = 0x180, - .meram_size = 0x100, - }, -}; static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = { .clock_source = LCDC_CLK_EXTERNAL, - .meram_dev = &meram_info, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .bpp = 16, .interface_type = RGB24, .clock_divider = 1, .flags = LCDC_FLAGS_DWPOL, - .meram_cfg = &hdmi_meram_cfg, } }; @@ -1016,7 +961,6 @@ static struct platform_device *ap4evb_devices[] __initdata = { &csi2_device, &ceu_device, &ap4evb_camera, - &meram_device, }; static void __init hdmi_init_pm_clock(void) diff --git a/trunk/arch/arm/mach-shmobile/board-mackerel.c b/trunk/arch/arm/mach-shmobile/board-mackerel.c index 776f20560e72..448ddbe43335 100644 --- a/trunk/arch/arm/mach-shmobile/board-mackerel.c +++ b/trunk/arch/arm/mach-shmobile/board-mackerel.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -315,30 +314,6 @@ static struct platform_device smc911x_device = { }, }; -/* MERAM */ -static struct sh_mobile_meram_info mackerel_meram_info = { - .addr_mode = SH_MOBILE_MERAM_MODE1, -}; - -static struct resource meram_resources[] = { - [0] = { - .name = "MERAM", - .start = 0xe8000000, - .end = 0xe81fffff, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device meram_device = { - .name = "sh_mobile_meram", - .id = 0, - .num_resources = ARRAY_SIZE(meram_resources), - .resource = meram_resources, - .dev = { - .platform_data = &mackerel_meram_info, - }, -}; - /* LCDC */ static struct fb_videomode mackerel_lcdc_modes[] = { { @@ -367,23 +342,7 @@ static int mackerel_get_brightness(void *board_data) return gpio_get_value(GPIO_PORT31); } -static struct sh_mobile_meram_cfg lcd_meram_cfg = { - .icb[0] = { - .marker_icb = 28, - .cache_icb = 24, - .meram_offset = 0x0, - .meram_size = 0x40, - }, - .icb[1] = { - .marker_icb = 29, - .cache_icb = 25, - .meram_offset = 0x40, - .meram_size = 0x40, - }, -}; - static struct sh_mobile_lcdc_info lcdc_info = { - .meram_dev = &mackerel_meram_info, .clock_source = LCDC_CLK_BUS, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, @@ -403,7 +362,6 @@ static struct sh_mobile_lcdc_info lcdc_info = { .name = "sh_mobile_lcdc_bl", .max_brightness = 1, }, - .meram_cfg = &lcd_meram_cfg, } }; @@ -430,23 +388,8 @@ static struct platform_device lcdc_device = { }, }; -static struct sh_mobile_meram_cfg hdmi_meram_cfg = { - .icb[0] = { - .marker_icb = 30, - .cache_icb = 26, - .meram_offset = 0x80, - .meram_size = 0x100, - }, - .icb[1] = { - .marker_icb = 31, - .cache_icb = 27, - .meram_offset = 0x180, - .meram_size = 0x100, - }, -}; /* HDMI */ static struct sh_mobile_lcdc_info hdmi_lcdc_info = { - .meram_dev = &mackerel_meram_info, .clock_source = LCDC_CLK_EXTERNAL, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, @@ -454,7 +397,6 @@ static struct sh_mobile_lcdc_info hdmi_lcdc_info = { .interface_type = RGB24, .clock_divider = 1, .flags = LCDC_FLAGS_DWPOL, - .meram_cfg = &hdmi_meram_cfg, } }; @@ -914,17 +856,6 @@ static int slot_cn7_get_cd(struct platform_device *pdev) } /* SDHI0 */ -static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg) -{ - struct device *dev = arg; - struct sh_mobile_sdhi_info *info = dev->platform_data; - struct tmio_mmc_data *pdata = info->pdata; - - tmio_mmc_cd_wakeup(pdata); - - return IRQ_HANDLED; -} - static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, @@ -1219,7 +1150,6 @@ static struct platform_device *mackerel_devices[] __initdata = { &mackerel_camera, &hdmi_lcdc_device, &hdmi_device, - &meram_device, }; /* Keypad Initialization */ @@ -1308,7 +1238,6 @@ static void __init mackerel_init(void) { u32 srcr4; struct clk *clk; - int ret; sh7372_pinmux_init(); @@ -1414,13 +1343,6 @@ static void __init mackerel_init(void) gpio_request(GPIO_FN_SDHID0_1, NULL); gpio_request(GPIO_FN_SDHID0_0, NULL); - ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd, - IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev); - if (!ret) - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; - else - pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret); - #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) /* enable SDHI1 */ gpio_request(GPIO_FN_SDHICMD1, NULL); diff --git a/trunk/arch/arm/mach-shmobile/clock-sh7372.c b/trunk/arch/arm/mach-shmobile/clock-sh7372.c index c0800d83971e..d17eb66f4ac2 100644 --- a/trunk/arch/arm/mach-shmobile/clock-sh7372.c +++ b/trunk/arch/arm/mach-shmobile/clock-sh7372.c @@ -509,7 +509,6 @@ enum { MSTP001, MSTP118, MSTP117, MSTP116, MSTP113, MSTP106, MSTP101, MSTP100, MSTP223, - MSTP218, MSTP217, MSTP216, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403, @@ -535,9 +534,6 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */ [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ [MSTP223] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR2, 23, 0), /* SPU2 */ - [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ - [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ - [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */ [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ @@ -630,9 +626,6 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ CLKDEV_DEV_ID("uio_pdrv_genirq.6", &mstp_clks[MSTP223]), /* SPU2DSP0 */ CLKDEV_DEV_ID("uio_pdrv_genirq.7", &mstp_clks[MSTP223]), /* SPU2DSP1 */ - CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */ - CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */ - CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ diff --git a/trunk/arch/arm/mach-tegra/board-harmony-power.c b/trunk/arch/arm/mach-tegra/board-harmony-power.c index c84442cabe07..5ad8b2f94f8d 100644 --- a/trunk/arch/arm/mach-tegra/board-harmony-power.c +++ b/trunk/arch/arm/mach-tegra/board-harmony-power.c @@ -24,6 +24,8 @@ #include +#include "board-harmony.h" + #define PMC_CTRL 0x0 #define PMC_CTRL_INTR_LOW (1 << 17) @@ -98,7 +100,7 @@ static struct tps6586x_platform_data tps_platform = { .irq_base = TEGRA_NR_IRQS, .num_subdevs = ARRAY_SIZE(tps_devs), .subdevs = tps_devs, - .gpio_base = TEGRA_NR_GPIOS, + .gpio_base = HARMONY_GPIO_TPS6586X(0), }; static struct i2c_board_info __initdata harmony_regulators[] = { diff --git a/trunk/arch/arm/mach-tegra/board-harmony.h b/trunk/arch/arm/mach-tegra/board-harmony.h index 1e57b071f52d..d85142edaf6b 100644 --- a/trunk/arch/arm/mach-tegra/board-harmony.h +++ b/trunk/arch/arm/mach-tegra/board-harmony.h @@ -17,7 +17,8 @@ #ifndef _MACH_TEGRA_BOARD_HARMONY_H #define _MACH_TEGRA_BOARD_HARMONY_H -#define HARMONY_GPIO_WM8903(_x_) (TEGRA_NR_GPIOS + (_x_)) +#define HARMONY_GPIO_TPS6586X(_x_) (TEGRA_NR_GPIOS + (_x_)) +#define HARMONY_GPIO_WM8903(_x_) (HARMONY_GPIO_TPS6586X(4) + (_x_)) #define TEGRA_GPIO_SD2_CD TEGRA_GPIO_PI5 #define TEGRA_GPIO_SD2_WP TEGRA_GPIO_PH1 diff --git a/trunk/arch/blackfin/lib/strncpy.S b/trunk/arch/blackfin/lib/strncpy.S index 2c07dddac995..f3931d50b4a7 100644 --- a/trunk/arch/blackfin/lib/strncpy.S +++ b/trunk/arch/blackfin/lib/strncpy.S @@ -25,7 +25,7 @@ ENTRY(_strncpy) CC = R2 == 0; - if CC JUMP 6f; + if CC JUMP 4f; P2 = R2 ; /* size */ P0 = R0 ; /* dst*/ diff --git a/trunk/arch/ia64/include/asm/unistd.h b/trunk/arch/ia64/include/asm/unistd.h index 7c928da35b17..1cf0f496f744 100644 --- a/trunk/arch/ia64/include/asm/unistd.h +++ b/trunk/arch/ia64/include/asm/unistd.h @@ -320,12 +320,11 @@ #define __NR_clock_adjtime 1328 #define __NR_syncfs 1329 #define __NR_setns 1330 -#define __NR_sendmmsg 1331 #ifdef __KERNEL__ -#define NR_syscalls 308 /* length of syscall table */ +#define NR_syscalls 307 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/trunk/arch/ia64/kernel/entry.S b/trunk/arch/ia64/kernel/entry.S index 97dd2abdeb1a..9ca80193cd4e 100644 --- a/trunk/arch/ia64/kernel/entry.S +++ b/trunk/arch/ia64/kernel/entry.S @@ -1776,7 +1776,6 @@ sys_call_table: data8 sys_clock_adjtime data8 sys_syncfs data8 sys_setns // 1330 - data8 sys_sendmmsg .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/trunk/arch/powerpc/platforms/powermac/pic.c b/trunk/arch/powerpc/platforms/powermac/pic.c index 7667db448aa7..9089b0421191 100644 --- a/trunk/arch/powerpc/platforms/powermac/pic.c +++ b/trunk/arch/powerpc/platforms/powermac/pic.c @@ -715,8 +715,7 @@ static struct syscore_ops pmacpic_syscore_ops = { static int __init init_pmacpic_syscore(void) { - if (pmac_irq_hw[0]) - register_syscore_ops(&pmacpic_syscore_ops); + register_syscore_ops(&pmacpic_syscore_ops); return 0; } diff --git a/trunk/arch/sh/Kconfig b/trunk/arch/sh/Kconfig index f03338c2f088..74495a5ea027 100644 --- a/trunk/arch/sh/Kconfig +++ b/trunk/arch/sh/Kconfig @@ -161,7 +161,7 @@ config ARCH_HAS_CPU_IDLE_WAIT config NO_IOPORT def_bool !PCI - depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN + depends on !SH_CAYMAN && !SH_SH4202_MICRODEV config IO_TRAPPED bool diff --git a/trunk/arch/sh/boards/mach-ap325rxa/setup.c b/trunk/arch/sh/boards/mach-ap325rxa/setup.c index 969421f64a15..618bd566cf53 100644 --- a/trunk/arch/sh/boards/mach-ap325rxa/setup.c +++ b/trunk/arch/sh/boards/mach-ap325rxa/setup.c @@ -359,31 +359,37 @@ static struct soc_camera_link camera_link = { .priv = &camera_info, }; -static struct platform_device *camera_device; - -static void ap325rxa_camera_release(struct device *dev) +static void dummy_release(struct device *dev) { - soc_camera_platform_release(&camera_device); } +static struct platform_device camera_device = { + .name = "soc_camera_platform", + .dev = { + .platform_data = &camera_info, + .release = dummy_release, + }, +}; + static int ap325rxa_camera_add(struct soc_camera_link *icl, struct device *dev) { - int ret = soc_camera_platform_add(icl, dev, &camera_device, &camera_link, - ap325rxa_camera_release, 0); - if (ret < 0) - return ret; + if (icl != &camera_link || camera_probe() <= 0) + return -ENODEV; - ret = camera_probe(); - if (ret < 0) - soc_camera_platform_del(icl, camera_device, &camera_link); + camera_info.dev = dev; - return ret; + return platform_device_register(&camera_device); } static void ap325rxa_camera_del(struct soc_camera_link *icl) { - soc_camera_platform_del(icl, camera_device, &camera_link); + if (icl != &camera_link) + return; + + platform_device_unregister(&camera_device); + memset(&camera_device.dev.kobj, 0, + sizeof(camera_device.dev.kobj)); } #endif /* CONFIG_I2C */ diff --git a/trunk/arch/sh/boards/mach-ecovec24/setup.c b/trunk/arch/sh/boards/mach-ecovec24/setup.c index 3a32741cc0ac..bb13d0e1b964 100644 --- a/trunk/arch/sh/boards/mach-ecovec24/setup.c +++ b/trunk/arch/sh/boards/mach-ecovec24/setup.c @@ -885,9 +885,6 @@ static struct platform_device sh_mmcif_device = { }, .num_resources = ARRAY_SIZE(sh_mmcif_resources), .resource = sh_mmcif_resources, - .archdata = { - .hwblk_id = HWBLK_MMC, - }, }; #endif diff --git a/trunk/arch/sh/include/asm/pgtable.h b/trunk/arch/sh/include/asm/pgtable.h index 9210e93a92c3..db85916b9e95 100644 --- a/trunk/arch/sh/include/asm/pgtable.h +++ b/trunk/arch/sh/include/asm/pgtable.h @@ -18,7 +18,6 @@ #include #endif #include -#include #ifndef __ASSEMBLY__ #include diff --git a/trunk/arch/sh/include/asm/ptrace.h b/trunk/arch/sh/include/asm/ptrace.h index 88bd6be168a9..40725b4a8018 100644 --- a/trunk/arch/sh/include/asm/ptrace.h +++ b/trunk/arch/sh/include/asm/ptrace.h @@ -41,9 +41,7 @@ #define user_mode(regs) (((regs)->sr & 0x40000000)==0) #define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15]) - -#define GET_FP(regs) ((regs)->regs[14]) -#define GET_USP(regs) ((regs)->regs[15]) +#define GET_USP(regs) ((regs)->regs[15]) extern void show_regs(struct pt_regs *); @@ -133,7 +131,7 @@ extern void ptrace_triggered(struct perf_event *bp, int nmi, static inline unsigned long profile_pc(struct pt_regs *regs) { - unsigned long pc = regs->pc; + unsigned long pc = instruction_pointer(regs); if (virt_addr_uncached(pc)) return CAC_ADDR(pc); diff --git a/trunk/arch/sh/include/asm/tlb.h b/trunk/arch/sh/include/asm/tlb.h index ec88bfcdf7ce..6c308d8b9a50 100644 --- a/trunk/arch/sh/include/asm/tlb.h +++ b/trunk/arch/sh/include/asm/tlb.h @@ -9,7 +9,6 @@ #include #ifdef CONFIG_MMU -#include #include #include #include diff --git a/trunk/arch/sh/include/cpu-sh4/cpu/sh7722.h b/trunk/arch/sh/include/cpu-sh4/cpu/sh7722.h index bd0622788d64..7a5b8a331b4a 100644 --- a/trunk/arch/sh/include/cpu-sh4/cpu/sh7722.h +++ b/trunk/arch/sh/include/cpu-sh4/cpu/sh7722.h @@ -236,7 +236,6 @@ enum { }; enum { - SHDMA_SLAVE_INVALID, SHDMA_SLAVE_SCIF0_TX, SHDMA_SLAVE_SCIF0_RX, SHDMA_SLAVE_SCIF1_TX, diff --git a/trunk/arch/sh/include/cpu-sh4/cpu/sh7724.h b/trunk/arch/sh/include/cpu-sh4/cpu/sh7724.h index 3daef8ecbc63..7eb435999426 100644 --- a/trunk/arch/sh/include/cpu-sh4/cpu/sh7724.h +++ b/trunk/arch/sh/include/cpu-sh4/cpu/sh7724.h @@ -285,7 +285,6 @@ enum { }; enum { - SHDMA_SLAVE_INVALID, SHDMA_SLAVE_SCIF0_TX, SHDMA_SLAVE_SCIF0_RX, SHDMA_SLAVE_SCIF1_TX, diff --git a/trunk/arch/sh/include/cpu-sh4/cpu/sh7757.h b/trunk/arch/sh/include/cpu-sh4/cpu/sh7757.h index 41f9f8b9db73..05b8196c7753 100644 --- a/trunk/arch/sh/include/cpu-sh4/cpu/sh7757.h +++ b/trunk/arch/sh/include/cpu-sh4/cpu/sh7757.h @@ -252,7 +252,6 @@ enum { }; enum { - SHDMA_SLAVE_INVALID, SHDMA_SLAVE_SDHI_TX, SHDMA_SLAVE_SDHI_RX, SHDMA_SLAVE_MMCIF_TX, diff --git a/trunk/arch/sh/kernel/process_32.c b/trunk/arch/sh/kernel/process_32.c index b473f0c06fbc..762a13984bbd 100644 --- a/trunk/arch/sh/kernel/process_32.c +++ b/trunk/arch/sh/kernel/process_32.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/sh/mm/consistent.c b/trunk/arch/sh/mm/consistent.c index f251b5f27652..40733a952402 100644 --- a/trunk/arch/sh/mm/consistent.c +++ b/trunk/arch/sh/mm/consistent.c @@ -82,7 +82,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, void *addr; addr = __in_29bit_mode() ? - (void *)CAC_ADDR((unsigned long)vaddr) : vaddr; + (void *)P1SEGADDR((unsigned long)vaddr) : vaddr; switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile index 90b06d4daee2..f5abe3a245b8 100644 --- a/trunk/arch/x86/kernel/Makefile +++ b/trunk/arch/x86/kernel/Makefile @@ -8,7 +8,6 @@ CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) ifdef CONFIG_FUNCTION_TRACER # Do not profile debug and lowlevel utilities -CFLAGS_REMOVE_tsc.o = -pg CFLAGS_REMOVE_rtc.o = -pg CFLAGS_REMOVE_paravirt-spinlocks.o = -pg CFLAGS_REMOVE_pvclock.o = -pg @@ -29,7 +28,6 @@ CFLAGS_paravirt.o := $(nostackp) GCOV_PROFILE_vsyscall_64.o := n GCOV_PROFILE_hpet.o := n GCOV_PROFILE_tsc.o := n -GCOV_PROFILE_vread_tsc_64.o := n GCOV_PROFILE_paravirt.o := n # vread_tsc_64 is hot and should be fully optimized: diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c index 2e4928d45a2d..426a5b66f7e4 100644 --- a/trunk/arch/x86/kernel/process.c +++ b/trunk/arch/x86/kernel/process.c @@ -642,7 +642,7 @@ static int __init idle_setup(char *str) boot_option_idle_override = IDLE_POLL; } else if (!strcmp(str, "mwait")) { boot_option_idle_override = IDLE_FORCE_MWAIT; - WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n"); + WARN_ONCE(1, "\idle=mwait\" will be removed in 2012\"\n"); } else if (!strcmp(str, "halt")) { /* * When the boot option of idle=halt is added, halt is diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index 33a0c11797de..eefd96765e79 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -1332,7 +1332,7 @@ static inline void mwait_play_dead(void) void *mwait_ptr; struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); - if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))) + if (!this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)) return; if (!this_cpu_has(X86_FEATURE_CLFLSH)) return; diff --git a/trunk/arch/x86/lguest/boot.c b/trunk/arch/x86/lguest/boot.c index db832fd65ecb..e191c096ab90 100644 --- a/trunk/arch/x86/lguest/boot.c +++ b/trunk/arch/x86/lguest/boot.c @@ -993,7 +993,6 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) static void lguest_time_init(void) { /* Set up the timer interrupt (0) to go to our simple timer routine */ - lguest_setup_irq(0); irq_set_handler(0, lguest_time_irq); clocksource_register_hz(&lguest_clock, NSEC_PER_SEC); diff --git a/trunk/block/blk-ioc.c b/trunk/block/blk-ioc.c index 342eae9b0d3c..c898049dafd5 100644 --- a/trunk/block/blk-ioc.c +++ b/trunk/block/blk-ioc.c @@ -21,7 +21,7 @@ static void cfq_dtor(struct io_context *ioc) if (!hlist_empty(&ioc->cic_list)) { struct cfq_io_context *cic; - cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, + cic = list_entry(ioc->cic_list.first, struct cfq_io_context, cic_list); cic->dtor(ioc); } @@ -57,7 +57,7 @@ static void cfq_exit(struct io_context *ioc) if (!hlist_empty(&ioc->cic_list)) { struct cfq_io_context *cic; - cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, + cic = list_entry(ioc->cic_list.first, struct cfq_io_context, cic_list); cic->exit(ioc); } diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index 3c7b537bf908..7c52d6888924 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -185,7 +185,7 @@ struct cfq_group { int nr_cfqq; /* - * Per group busy queues average. Useful for workload slice calc. We + * Per group busy queus average. Useful for workload slice calc. We * create the array for each prio class but at run time it is used * only for RT and BE class and slot for IDLE class remains unused. * This is primarily done to avoid confusion and a gcc warning. @@ -369,16 +369,16 @@ CFQ_CFQQ_FNS(wait_busy); #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ - blkg_path(&(cfqq)->cfqg->blkg), ##args) + blkg_path(&(cfqq)->cfqg->blkg), ##args); #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ - blkg_path(&(cfqg)->blkg), ##args) \ + blkg_path(&(cfqg)->blkg), ##args); \ #else #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) -#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) +#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0); #endif #define cfq_log(cfqd, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) @@ -3786,6 +3786,9 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) return 0; queue_fail: + if (cic) + put_io_context(cic->ioc); + cfq_schedule_dispatch(cfqd); spin_unlock_irqrestore(q->queue_lock, flags); cfq_log(cfqd, "set_request fail"); diff --git a/trunk/drivers/block/nbd.c b/trunk/drivers/block/nbd.c index f533f3375e24..e6fc716aca45 100644 --- a/trunk/drivers/block/nbd.c +++ b/trunk/drivers/block/nbd.c @@ -192,8 +192,7 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, if (lo->xmit_timeout) del_timer_sync(&ti); } else - result = kernel_recvmsg(sock, &msg, &iov, 1, size, - msg.msg_flags); + result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0); if (signal_pending(current)) { siginfo_t info; @@ -754,26 +753,9 @@ static int __init nbd_init(void) return -ENOMEM; part_shift = 0; - if (max_part > 0) { + if (max_part > 0) part_shift = fls(max_part); - /* - * Adjust max_part according to part_shift as it is exported - * to user space so that user can know the max number of - * partition kernel should be able to manage. - * - * Note that -1 is required because partition 0 is reserved - * for the whole disk. - */ - max_part = (1UL << part_shift) - 1; - } - - if ((1UL << part_shift) > DISK_MAX_PARTS) - return -EINVAL; - - if (nbds_max > 1UL << (MINORBITS - part_shift)) - return -EINVAL; - for (i = 0; i < nbds_max; i++) { struct gendisk *disk = alloc_disk(1 << part_shift); if (!disk) diff --git a/trunk/drivers/block/paride/pcd.c b/trunk/drivers/block/paride/pcd.c index 46b8136c31bb..a0aabd904a51 100644 --- a/trunk/drivers/block/paride/pcd.c +++ b/trunk/drivers/block/paride/pcd.c @@ -321,6 +321,7 @@ static void pcd_init_units(void) strcpy(disk->disk_name, cd->name); /* umm... */ disk->fops = &pcd_bdops; disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; + disk->events = DISK_EVENT_MEDIA_CHANGE; } } diff --git a/trunk/drivers/block/virtio_blk.c b/trunk/drivers/block/virtio_blk.c index 079c08808d8a..6ecf89cdf006 100644 --- a/trunk/drivers/block/virtio_blk.c +++ b/trunk/drivers/block/virtio_blk.c @@ -6,13 +6,10 @@ #include #include #include -#include -#include #define PART_BITS 4 static int major, index; -struct workqueue_struct *virtblk_wq; struct virtio_blk { @@ -29,9 +26,6 @@ struct virtio_blk mempool_t *pool; - /* Process context for config space updates */ - struct work_struct config_work; - /* What host tells us, plus 2 for header & tailer. */ unsigned int sg_elems; @@ -147,7 +141,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) { - sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE); + sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, sizeof(vbr->in_hdr)); } @@ -297,46 +291,6 @@ static ssize_t virtblk_serial_show(struct device *dev, } DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); -static void virtblk_config_changed_work(struct work_struct *work) -{ - struct virtio_blk *vblk = - container_of(work, struct virtio_blk, config_work); - struct virtio_device *vdev = vblk->vdev; - struct request_queue *q = vblk->disk->queue; - char cap_str_2[10], cap_str_10[10]; - u64 capacity, size; - - /* Host must always specify the capacity. */ - vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), - &capacity, sizeof(capacity)); - - /* If capacity is too big, truncate with warning. */ - if ((sector_t)capacity != capacity) { - dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", - (unsigned long long)capacity); - capacity = (sector_t)-1; - } - - size = capacity * queue_logical_block_size(q); - string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); - string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); - - dev_notice(&vdev->dev, - "new size: %llu %d-byte logical blocks (%s/%s)\n", - (unsigned long long)capacity, - queue_logical_block_size(q), - cap_str_10, cap_str_2); - - set_capacity(vblk->disk, capacity); -} - -static void virtblk_config_changed(struct virtio_device *vdev) -{ - struct virtio_blk *vblk = vdev->priv; - - queue_work(virtblk_wq, &vblk->config_work); -} - static int __devinit virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; @@ -373,7 +327,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) vblk->vdev = vdev; vblk->sg_elems = sg_elems; sg_init_table(vblk->sg, vblk->sg_elems); - INIT_WORK(&vblk->config_work, virtblk_config_changed_work); /* We expect one virtqueue, for output. */ vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests"); @@ -524,8 +477,6 @@ static void __devexit virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; - flush_work(&vblk->config_work); - /* Nothing should be pending. */ BUG_ON(!list_empty(&vblk->reqs)); @@ -557,47 +508,27 @@ static unsigned int features[] = { * Use __refdata to avoid this warning. */ static struct virtio_driver __refdata virtio_blk = { - .feature_table = features, - .feature_table_size = ARRAY_SIZE(features), - .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, - .id_table = id_table, - .probe = virtblk_probe, - .remove = __devexit_p(virtblk_remove), - .config_changed = virtblk_config_changed, + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtblk_probe, + .remove = __devexit_p(virtblk_remove), }; static int __init init(void) { - int error; - - virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); - if (!virtblk_wq) - return -ENOMEM; - major = register_blkdev(0, "virtblk"); - if (major < 0) { - error = major; - goto out_destroy_workqueue; - } - - error = register_virtio_driver(&virtio_blk); - if (error) - goto out_unregister_blkdev; - return 0; - -out_unregister_blkdev: - unregister_blkdev(major, "virtblk"); -out_destroy_workqueue: - destroy_workqueue(virtblk_wq); - return error; + if (major < 0) + return major; + return register_virtio_driver(&virtio_blk); } static void __exit fini(void) { unregister_blkdev(major, "virtblk"); unregister_virtio_driver(&virtio_blk); - destroy_workqueue(virtblk_wq); } module_init(init); module_exit(fini); diff --git a/trunk/drivers/block/xen-blkback/blkback.c b/trunk/drivers/block/xen-blkback/blkback.c index 5cf2993a8338..c73910cc28c9 100644 --- a/trunk/drivers/block/xen-blkback/blkback.c +++ b/trunk/drivers/block/xen-blkback/blkback.c @@ -809,13 +809,11 @@ static int __init xen_blkif_init(void) failed_init: kfree(blkbk->pending_reqs); kfree(blkbk->pending_grant_handles); - if (blkbk->pending_pages) { - for (i = 0; i < mmap_pages; i++) { - if (blkbk->pending_pages[i]) - __free_page(blkbk->pending_pages[i]); - } - kfree(blkbk->pending_pages); + for (i = 0; i < mmap_pages; i++) { + if (blkbk->pending_pages[i]) + __free_page(blkbk->pending_pages[i]); } + kfree(blkbk->pending_pages); kfree(blkbk); blkbk = NULL; return rc; diff --git a/trunk/drivers/block/xen-blkback/xenbus.c b/trunk/drivers/block/xen-blkback/xenbus.c index 6cc0db1bf522..34570823355b 100644 --- a/trunk/drivers/block/xen-blkback/xenbus.c +++ b/trunk/drivers/block/xen-blkback/xenbus.c @@ -357,13 +357,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, } vbd->bdev = bdev; + vbd->size = vbd_sz(vbd); + if (vbd->bdev->bd_disk == NULL) { DPRINTK("xen_vbd_create: device %08x doesn't exist.\n", vbd->pdevice); xen_vbd_free(vbd); return -ENOENT; } - vbd->size = vbd_sz(vbd); if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom) vbd->type |= VDISK_CDROM; diff --git a/trunk/drivers/bluetooth/hci_ldisc.c b/trunk/drivers/bluetooth/hci_ldisc.c index 48ad2a7ab080..b3f01996318f 100644 --- a/trunk/drivers/bluetooth/hci_ldisc.c +++ b/trunk/drivers/bluetooth/hci_ldisc.c @@ -355,24 +355,29 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty) * flags pointer to flags for data * count count of received data in bytes * - * Return Value: None + * Return Value: Number of bytes received */ -static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count) +static unsigned int hci_uart_tty_receive(struct tty_struct *tty, + const u8 *data, char *flags, int count) { struct hci_uart *hu = (void *)tty->disc_data; + int received; if (!hu || tty != hu->tty) - return; + return -ENODEV; if (!test_bit(HCI_UART_PROTO_SET, &hu->flags)) - return; + return -EINVAL; spin_lock(&hu->rx_lock); - hu->proto->recv(hu, (void *) data, count); - hu->hdev->stat.byte_rx += count; + received = hu->proto->recv(hu, (void *) data, count); + if (received > 0) + hu->hdev->stat.byte_rx += received; spin_unlock(&hu->rx_lock); tty_unthrottle(tty); + + return received; } static int hci_uart_register_dev(struct hci_uart *hu) diff --git a/trunk/drivers/cdrom/viocd.c b/trunk/drivers/cdrom/viocd.c index 7878da89d29e..ae15a4ddaa9b 100644 --- a/trunk/drivers/cdrom/viocd.c +++ b/trunk/drivers/cdrom/viocd.c @@ -627,6 +627,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) gendisk->fops = &viocd_fops; gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; + gendisk->events = DISK_EVENT_MEDIA_CHANGE; set_capacity(gendisk, 0); gendisk->private_data = d; d->viocd_disk = gendisk; diff --git a/trunk/drivers/char/virtio_console.c b/trunk/drivers/char/virtio_console.c index fb68b1295373..838568a7dbf5 100644 --- a/trunk/drivers/char/virtio_console.c +++ b/trunk/drivers/char/virtio_console.c @@ -1677,12 +1677,17 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) portdev->config.max_nr_ports = 1; if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { multiport = true; + vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; + vdev->config->get(vdev, offsetof(struct virtio_console_config, max_nr_ports), &portdev->config.max_nr_ports, sizeof(portdev->config.max_nr_ports)); } + /* Let the Host know we support multiple ports.*/ + vdev->config->finalize_features(vdev); + err = init_vqs(portdev); if (err < 0) { dev_err(&vdev->dev, "Error %d initializing vqs\n", err); diff --git a/trunk/drivers/clocksource/sh_cmt.c b/trunk/drivers/clocksource/sh_cmt.c index dc7c033ef587..036e5865eb40 100644 --- a/trunk/drivers/clocksource/sh_cmt.c +++ b/trunk/drivers/clocksource/sh_cmt.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -152,10 +153,12 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) { int ret; - /* enable clock */ + /* wake up device and enable clock */ + pm_runtime_get_sync(&p->pdev->dev); ret = clk_enable(p->clk); if (ret) { dev_err(&p->pdev->dev, "cannot enable clock\n"); + pm_runtime_put_sync(&p->pdev->dev); return ret; } @@ -187,8 +190,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p) /* disable interrupts in CMT block */ sh_cmt_write(p, CMCSR, 0); - /* stop clock */ + /* stop clock and mark device as idle */ clk_disable(p->clk); + pm_runtime_put_sync(&p->pdev->dev); } /* private flags */ @@ -660,6 +664,7 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev) if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); + pm_runtime_enable(&pdev->dev); return 0; } @@ -674,6 +679,9 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev) kfree(p); platform_set_drvdata(pdev, NULL); } + + if (!is_early_platform_device(pdev)) + pm_runtime_enable(&pdev->dev); return ret; } diff --git a/trunk/drivers/clocksource/sh_tmu.c b/trunk/drivers/clocksource/sh_tmu.c index 808135768617..17296288a205 100644 --- a/trunk/drivers/clocksource/sh_tmu.c +++ b/trunk/drivers/clocksource/sh_tmu.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -109,10 +110,12 @@ static int sh_tmu_enable(struct sh_tmu_priv *p) { int ret; - /* enable clock */ + /* wake up device and enable clock */ + pm_runtime_get_sync(&p->pdev->dev); ret = clk_enable(p->clk); if (ret) { dev_err(&p->pdev->dev, "cannot enable clock\n"); + pm_runtime_put_sync(&p->pdev->dev); return ret; } @@ -141,8 +144,9 @@ static void sh_tmu_disable(struct sh_tmu_priv *p) /* disable interrupts in TMU block */ sh_tmu_write(p, TCR, 0x0000); - /* stop clock */ + /* stop clock and mark device as idle */ clk_disable(p->clk); + pm_runtime_put_sync(&p->pdev->dev); } static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, @@ -411,6 +415,7 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev) if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); + pm_runtime_enable(&pdev->dev); return 0; } @@ -425,6 +430,9 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev) kfree(p); platform_set_drvdata(pdev, NULL); } + + if (!is_early_platform_device(pdev)) + pm_runtime_enable(&pdev->dev); return ret; } diff --git a/trunk/drivers/dma/shdma.c b/trunk/drivers/dma/shdma.c index 2a638f9f09a2..636e40925b16 100644 --- a/trunk/drivers/dma/shdma.c +++ b/trunk/drivers/dma/shdma.c @@ -343,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) dmae_set_dmars(sh_chan, cfg->mid_rid); dmae_set_chcr(sh_chan, cfg->chcr); - } else { + } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { dmae_init(sh_chan); } @@ -1144,8 +1144,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev) /* platform data */ shdev->pdata = pdata; - platform_set_drvdata(pdev, shdev); - pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); @@ -1258,6 +1256,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); + platform_set_drvdata(pdev, shdev); dma_async_device_register(&shdev->common); return err; @@ -1279,8 +1278,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev) if (dmars) iounmap(shdev->dmars); - - platform_set_drvdata(pdev, NULL); emapdmars: iounmap(shdev->chan_reg); synchronize_rcu(); @@ -1319,8 +1316,6 @@ static int __exit sh_dmae_remove(struct platform_device *pdev) iounmap(shdev->dmars); iounmap(shdev->chan_reg); - platform_set_drvdata(pdev, NULL); - synchronize_rcu(); kfree(shdev); diff --git a/trunk/drivers/ide/ide-cd.c b/trunk/drivers/ide/ide-cd.c index 144d27261e43..6e5123b1d341 100644 --- a/trunk/drivers/ide/ide-cd.c +++ b/trunk/drivers/ide/ide-cd.c @@ -1782,6 +1782,7 @@ static int ide_cd_probe(ide_drive_t *drive) ide_cd_read_toc(drive, &sense); g->fops = &idecd_ops; g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; + g->events = DISK_EVENT_MEDIA_CHANGE; add_disk(g); return 0; diff --git a/trunk/drivers/input/serio/serport.c b/trunk/drivers/input/serio/serport.c index 8755f5f3ad37..f3698967edf6 100644 --- a/trunk/drivers/input/serio/serport.c +++ b/trunk/drivers/input/serio/serport.c @@ -120,17 +120,21 @@ static void serport_ldisc_close(struct tty_struct *tty) * 'interrupt' routine. */ -static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) +static unsigned int serport_ldisc_receive(struct tty_struct *tty, + const unsigned char *cp, char *fp, int count) { struct serport *serport = (struct serport*) tty->disc_data; unsigned long flags; unsigned int ch_flags; + int ret = 0; int i; spin_lock_irqsave(&serport->lock, flags); - if (!test_bit(SERPORT_ACTIVE, &serport->flags)) + if (!test_bit(SERPORT_ACTIVE, &serport->flags)) { + ret = -EINVAL; goto out; + } for (i = 0; i < count; i++) { switch (fp[i]) { @@ -152,6 +156,8 @@ static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *c out: spin_unlock_irqrestore(&serport->lock, flags); + + return ret == 0 ? count : ret; } /* diff --git a/trunk/drivers/isdn/gigaset/ser-gigaset.c b/trunk/drivers/isdn/gigaset/ser-gigaset.c index 86a5c4f7775e..1d44d470897c 100644 --- a/trunk/drivers/isdn/gigaset/ser-gigaset.c +++ b/trunk/drivers/isdn/gigaset/ser-gigaset.c @@ -674,7 +674,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file, * cflags buffer containing error flags for received characters (ignored) * count number of received characters */ -static void +static unsigned int gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf, char *cflags, int count) { @@ -683,12 +683,12 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf, struct inbuf_t *inbuf; if (!cs) - return; + return -ENODEV; inbuf = cs->inbuf; if (!inbuf) { dev_err(cs->dev, "%s: no inbuf\n", __func__); cs_put(cs); - return; + return -EINVAL; } tail = inbuf->tail; @@ -725,6 +725,8 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf, gig_dbg(DEBUG_INTR, "%s-->BH", __func__); gigaset_schedule_event(cs); cs_put(cs); + + return count; } /* diff --git a/trunk/drivers/misc/kgdbts.c b/trunk/drivers/misc/kgdbts.c index 8cebec5e85ee..b0c56313dbbb 100644 --- a/trunk/drivers/misc/kgdbts.c +++ b/trunk/drivers/misc/kgdbts.c @@ -304,10 +304,7 @@ static int check_and_rewind_pc(char *put_str, char *arg) return 1; } /* Readjust the instruction pointer if needed */ - ip += offset; -#ifdef GDB_ADJUSTS_BREAK_OFFSET - instruction_pointer_set(&kgdbts_regs, ip); -#endif + instruction_pointer_set(&kgdbts_regs, ip + offset); return 0; } diff --git a/trunk/drivers/misc/ti-st/st_core.c b/trunk/drivers/misc/ti-st/st_core.c index f91f82eabda7..1a05fe08e2cb 100644 --- a/trunk/drivers/misc/ti-st/st_core.c +++ b/trunk/drivers/misc/ti-st/st_core.c @@ -747,8 +747,8 @@ static void st_tty_close(struct tty_struct *tty) pr_debug("%s: done ", __func__); } -static void st_tty_receive(struct tty_struct *tty, const unsigned char *data, - char *tty_flags, int count) +static unsigned int st_tty_receive(struct tty_struct *tty, + const unsigned char *data, char *tty_flags, int count) { #ifdef VERBOSE print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE, @@ -761,6 +761,8 @@ static void st_tty_receive(struct tty_struct *tty, const unsigned char *data, */ st_recv(tty->disc_data, data, count); pr_debug("done %s", __func__); + + return count; } /* wake-up function called in from the TTY layer diff --git a/trunk/drivers/net/3c509.c b/trunk/drivers/net/3c509.c index 44b28b2d7003..5f25889e27ef 100644 --- a/trunk/drivers/net/3c509.c +++ b/trunk/drivers/net/3c509.c @@ -185,7 +185,7 @@ static int max_interrupt_work = 10; static int nopnp; #endif -static int __devinit el3_common_init(struct net_device *dev); +static int el3_common_init(struct net_device *dev); static void el3_common_remove(struct net_device *dev); static ushort id_read_eeprom(int index); static ushort read_eeprom(int ioaddr, int index); @@ -395,7 +395,7 @@ static struct isa_driver el3_isa_driver = { static int isa_registered; #ifdef CONFIG_PNP -static struct pnp_device_id el3_pnp_ids[] = { +static const struct pnp_device_id el3_pnp_ids[] __devinitconst = { { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ { .id = "TCM5091" }, /* 3Com Etherlink III */ { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ @@ -478,7 +478,7 @@ static int pnp_registered; #endif /* CONFIG_PNP */ #ifdef CONFIG_EISA -static struct eisa_device_id el3_eisa_ids[] = { +static const struct eisa_device_id el3_eisa_ids[] __devinitconst = { { "TCM5090" }, { "TCM5091" }, { "TCM5092" }, @@ -508,7 +508,7 @@ static int eisa_registered; #ifdef CONFIG_MCA static int el3_mca_probe(struct device *dev); -static short el3_mca_adapter_ids[] __initdata = { +static const short el3_mca_adapter_ids[] __devinitconst = { 0x627c, 0x627d, 0x62db, @@ -517,7 +517,7 @@ static short el3_mca_adapter_ids[] __initdata = { 0x0000 }; -static char *el3_mca_adapter_names[] __initdata = { +static const char *const el3_mca_adapter_names[] __devinitconst = { "3Com 3c529 EtherLink III (10base2)", "3Com 3c529 EtherLink III (10baseT)", "3Com 3c529 EtherLink III (test mode)", @@ -601,7 +601,7 @@ static void el3_common_remove (struct net_device *dev) } #ifdef CONFIG_MCA -static int __init el3_mca_probe(struct device *device) +static int __devinit el3_mca_probe(struct device *device) { /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, * heavily modified by Chris Beauregard @@ -671,7 +671,7 @@ static int __init el3_mca_probe(struct device *device) #endif /* CONFIG_MCA */ #ifdef CONFIG_EISA -static int __init el3_eisa_probe (struct device *device) +static int __devinit el3_eisa_probe (struct device *device) { short i; int ioaddr, irq, if_port; diff --git a/trunk/drivers/net/3c59x.c b/trunk/drivers/net/3c59x.c index 8cc22568ebd3..99f43d275442 100644 --- a/trunk/drivers/net/3c59x.c +++ b/trunk/drivers/net/3c59x.c @@ -901,14 +901,14 @@ static const struct dev_pm_ops vortex_pm_ops = { #endif /* !CONFIG_PM */ #ifdef CONFIG_EISA -static struct eisa_device_id vortex_eisa_ids[] = { +static const struct eisa_device_id vortex_eisa_ids[] __devinitconst = { { "TCM5920", CH_3C592 }, { "TCM5970", CH_3C597 }, { "" } }; MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); -static int __init vortex_eisa_probe(struct device *device) +static int __devinit vortex_eisa_probe(struct device *device) { void __iomem *ioaddr; struct eisa_device *edev; diff --git a/trunk/drivers/net/caif/caif_serial.c b/trunk/drivers/net/caif/caif_serial.c index 3df0c0f8b8bf..73c7e03617ec 100644 --- a/trunk/drivers/net/caif/caif_serial.c +++ b/trunk/drivers/net/caif/caif_serial.c @@ -167,8 +167,8 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size) #endif -static void ldisc_receive(struct tty_struct *tty, const u8 *data, - char *flags, int count) +static unsigned int ldisc_receive(struct tty_struct *tty, + const u8 *data, char *flags, int count) { struct sk_buff *skb = NULL; struct ser_device *ser; @@ -215,6 +215,8 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data, } else ++ser->dev->stats.rx_dropped; update_tty_status(ser); + + return count; } static int handle_tx(struct ser_device *ser) diff --git a/trunk/drivers/net/can/flexcan.c b/trunk/drivers/net/can/flexcan.c index 17678117ed69..d4990568baee 100644 --- a/trunk/drivers/net/can/flexcan.c +++ b/trunk/drivers/net/can/flexcan.c @@ -923,7 +923,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev) mem_size = resource_size(mem); if (!request_mem_region(mem->start, mem_size, pdev->name)) { err = -EBUSY; - goto failed_get; + goto failed_req; } base = ioremap(mem->start, mem_size); @@ -977,8 +977,9 @@ static int __devinit flexcan_probe(struct platform_device *pdev) iounmap(base); failed_map: release_mem_region(mem->start, mem_size); - failed_get: + failed_req: clk_put(clk); + failed_get: failed_clock: return err; } diff --git a/trunk/drivers/net/can/slcan.c b/trunk/drivers/net/can/slcan.c index 1b49df6b2470..75622d54581f 100644 --- a/trunk/drivers/net/can/slcan.c +++ b/trunk/drivers/net/can/slcan.c @@ -425,16 +425,17 @@ static void slc_setup(struct net_device *dev) * in parallel */ -static void slcan_receive_buf(struct tty_struct *tty, +static unsigned int slcan_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct slcan *sl = (struct slcan *) tty->disc_data; + int bytes = count; if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) - return; + return -ENODEV; /* Read the characters out of the buffer */ - while (count--) { + while (bytes--) { if (fp && *fp++) { if (!test_and_set_bit(SLF_ERROR, &sl->flags)) sl->dev->stats.rx_errors++; @@ -443,6 +444,8 @@ static void slcan_receive_buf(struct tty_struct *tty, } slcan_unesc(sl, *cp++); } + + return count; } /************************************ diff --git a/trunk/drivers/net/davinci_emac.c b/trunk/drivers/net/davinci_emac.c index dcc4a170b0f3..29a4f06fbfcf 100644 --- a/trunk/drivers/net/davinci_emac.c +++ b/trunk/drivers/net/davinci_emac.c @@ -1781,8 +1781,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) ndev = alloc_etherdev(sizeof(struct emac_priv)); if (!ndev) { dev_err(&pdev->dev, "error allocating net_device\n"); - rc = -ENOMEM; - goto free_clk; + clk_put(emac_clk); + return -ENOMEM; } platform_set_drvdata(pdev, ndev); @@ -1796,8 +1796,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); - rc = -ENODEV; - goto probe_quit; + return -ENODEV; } /* MAC addr and PHY mask , RMII enable info from platform_data */ @@ -1930,9 +1929,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) iounmap(priv->remap_addr); probe_quit: - free_netdev(ndev); -free_clk: clk_put(emac_clk); + free_netdev(ndev); return rc; } diff --git a/trunk/drivers/net/depca.c b/trunk/drivers/net/depca.c index 8b0084d17c8c..17654059922d 100644 --- a/trunk/drivers/net/depca.c +++ b/trunk/drivers/net/depca.c @@ -331,18 +331,18 @@ static struct { "DE422",\ ""} -static char* __initdata depca_signature[] = DEPCA_SIGNATURE; +static const char* const depca_signature[] __devinitconst = DEPCA_SIGNATURE; enum depca_type { DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown }; -static char depca_string[] = "depca"; +static const char depca_string[] = "depca"; static int depca_device_remove (struct device *device); #ifdef CONFIG_EISA -static struct eisa_device_id depca_eisa_ids[] = { +static const struct eisa_device_id depca_eisa_ids[] __devinitconst = { { "DEC4220", de422 }, { "" } }; @@ -367,19 +367,19 @@ static struct eisa_driver depca_eisa_driver = { #define DE210_ID 0x628d #define DE212_ID 0x6def -static short depca_mca_adapter_ids[] = { +static const short depca_mca_adapter_ids[] __devinitconst = { DE210_ID, DE212_ID, 0x0000 }; -static char *depca_mca_adapter_name[] = { +static const char *depca_mca_adapter_name[] = { "DEC EtherWORKS MC Adapter (DE210)", "DEC EtherWORKS MC Adapter (DE212)", NULL }; -static enum depca_type depca_mca_adapter_type[] = { +static const enum depca_type depca_mca_adapter_type[] = { de210, de212, 0 @@ -541,10 +541,9 @@ static void SetMulticastFilter(struct net_device *dev); static int load_packet(struct net_device *dev, struct sk_buff *skb); static void depca_dbg_open(struct net_device *dev); -static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 }; -static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 }; -static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 }; -static u_char *depca_irq; +static const u_char de1xx_irq[] __devinitconst = { 2, 3, 4, 5, 7, 9, 0 }; +static const u_char de2xx_irq[] __devinitconst = { 5, 9, 10, 11, 15, 0 }; +static const u_char de422_irq[] __devinitconst = { 5, 9, 10, 11, 0 }; static int irq; static int io; @@ -580,7 +579,7 @@ static const struct net_device_ops depca_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -static int __init depca_hw_init (struct net_device *dev, struct device *device) +static int __devinit depca_hw_init (struct net_device *dev, struct device *device) { struct depca_private *lp; int i, j, offset, netRAM, mem_len, status = 0; @@ -748,6 +747,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) if (dev->irq < 2) { unsigned char irqnum; unsigned long irq_mask, delay; + const u_char *depca_irq; irq_mask = probe_irq_on(); @@ -770,6 +770,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) break; default: + depca_irq = NULL; break; /* Not reached */ } @@ -1302,7 +1303,7 @@ static void SetMulticastFilter(struct net_device *dev) } } -static int __init depca_common_init (u_long ioaddr, struct net_device **devp) +static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp) { int status = 0; @@ -1333,7 +1334,7 @@ static int __init depca_common_init (u_long ioaddr, struct net_device **devp) /* ** Microchannel bus I/O device probe */ -static int __init depca_mca_probe(struct device *device) +static int __devinit depca_mca_probe(struct device *device) { unsigned char pos[2]; unsigned char where; @@ -1457,7 +1458,7 @@ static int __init depca_mca_probe(struct device *device) ** ISA bus I/O device probe */ -static void __init depca_platform_probe (void) +static void __devinit depca_platform_probe (void) { int i; struct platform_device *pldev; @@ -1497,7 +1498,7 @@ static void __init depca_platform_probe (void) } } -static enum depca_type __init depca_shmem_probe (ulong *mem_start) +static enum depca_type __devinit depca_shmem_probe (ulong *mem_start) { u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES; enum depca_type adapter = unknown; @@ -1558,7 +1559,7 @@ static int __devinit depca_isa_probe (struct platform_device *device) */ #ifdef CONFIG_EISA -static int __init depca_eisa_probe (struct device *device) +static int __devinit depca_eisa_probe (struct device *device) { enum depca_type adapter = unknown; struct eisa_device *edev; @@ -1629,7 +1630,7 @@ static int __devexit depca_device_remove (struct device *device) ** and Boot (readb) ROM. This will also give us a clue to the network RAM ** base address. */ -static int __init DepcaSignature(char *name, u_long base_addr) +static int __devinit DepcaSignature(char *name, u_long base_addr) { u_int i, j, k; void __iomem *ptr; diff --git a/trunk/drivers/net/dm9000.c b/trunk/drivers/net/dm9000.c index ee597e676ee5..fbaff3584bd4 100644 --- a/trunk/drivers/net/dm9000.c +++ b/trunk/drivers/net/dm9000.c @@ -1157,6 +1157,9 @@ dm9000_open(struct net_device *dev) irqflags |= IRQF_SHARED; + if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) + return -EAGAIN; + /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ mdelay(1); /* delay needs by DM9000B */ @@ -1165,9 +1168,6 @@ dm9000_open(struct net_device *dev) dm9000_reset(db); dm9000_init_dm9000(dev); - if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) - return -EAGAIN; - /* Init driver variable */ db->dbug_cnt = 0; diff --git a/trunk/drivers/net/hamradio/6pack.c b/trunk/drivers/net/hamradio/6pack.c index 3e5d0b6b6516..992089639ea4 100644 --- a/trunk/drivers/net/hamradio/6pack.c +++ b/trunk/drivers/net/hamradio/6pack.c @@ -456,7 +456,7 @@ static void sixpack_write_wakeup(struct tty_struct *tty) * a block of 6pack data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. */ -static void sixpack_receive_buf(struct tty_struct *tty, +static unsigned int sixpack_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct sixpack *sp; @@ -464,11 +464,11 @@ static void sixpack_receive_buf(struct tty_struct *tty, int count1; if (!count) - return; + return 0; sp = sp_get(tty); if (!sp) - return; + return -ENODEV; memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf)); @@ -487,6 +487,8 @@ static void sixpack_receive_buf(struct tty_struct *tty, sp_put(sp); tty_unthrottle(tty); + + return count1; } /* diff --git a/trunk/drivers/net/hamradio/mkiss.c b/trunk/drivers/net/hamradio/mkiss.c index 4c628393c8b1..0e4f23531140 100644 --- a/trunk/drivers/net/hamradio/mkiss.c +++ b/trunk/drivers/net/hamradio/mkiss.c @@ -923,13 +923,14 @@ static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file, * a block of data has been received, which can now be decapsulated * and sent on to the AX.25 layer for further processing. */ -static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) +static unsigned int mkiss_receive_buf(struct tty_struct *tty, + const unsigned char *cp, char *fp, int count) { struct mkiss *ax = mkiss_get(tty); + int bytes = count; if (!ax) - return; + return -ENODEV; /* * Argh! mtu change time! - costs us the packet part received @@ -939,7 +940,7 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp, ax_changedmtu(ax); /* Read the characters out of the buffer */ - while (count--) { + while (bytes--) { if (fp != NULL && *fp++) { if (!test_and_set_bit(AXF_ERROR, &ax->flags)) ax->dev->stats.rx_errors++; @@ -952,6 +953,8 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp, mkiss_put(ax); tty_unthrottle(tty); + + return count; } /* diff --git a/trunk/drivers/net/hp100.c b/trunk/drivers/net/hp100.c index 8e10d2f6a5ad..c52a1df5d922 100644 --- a/trunk/drivers/net/hp100.c +++ b/trunk/drivers/net/hp100.c @@ -188,14 +188,14 @@ struct hp100_private { * variables */ #ifdef CONFIG_ISA -static const char *hp100_isa_tbl[] = { +static const char *const hp100_isa_tbl[] __devinitconst = { "HWPF150", /* HP J2573 rev A */ "HWP1950", /* HP J2573 */ }; #endif #ifdef CONFIG_EISA -static struct eisa_device_id hp100_eisa_tbl[] = { +static const struct eisa_device_id hp100_eisa_tbl[] __devinitconst = { { "HWPF180" }, /* HP J2577 rev A */ { "HWP1920" }, /* HP 27248B */ { "HWP1940" }, /* HP J2577 */ @@ -336,7 +336,7 @@ static __devinit const char *hp100_read_id(int ioaddr) } #ifdef CONFIG_ISA -static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr) +static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr) { const char *sig; int i; @@ -372,7 +372,7 @@ static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr) * EISA and PCI are handled by device infrastructure. */ -static int __init hp100_isa_probe(struct net_device *dev, int addr) +static int __devinit hp100_isa_probe(struct net_device *dev, int addr) { int err = -ENODEV; @@ -396,7 +396,7 @@ static int __init hp100_isa_probe(struct net_device *dev, int addr) #endif /* CONFIG_ISA */ #if !defined(MODULE) && defined(CONFIG_ISA) -struct net_device * __init hp100_probe(int unit) +struct net_device * __devinit hp100_probe(int unit) { struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); int err; @@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d) } #ifdef CONFIG_EISA -static int __init hp100_eisa_probe (struct device *gendev) +static int __devinit hp100_eisa_probe (struct device *gendev) { struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); struct eisa_device *edev = to_eisa_device(gendev); diff --git a/trunk/drivers/net/ibmlana.c b/trunk/drivers/net/ibmlana.c index a7d6cad32953..136d7544cc33 100644 --- a/trunk/drivers/net/ibmlana.c +++ b/trunk/drivers/net/ibmlana.c @@ -895,12 +895,12 @@ static int ibmlana_irq; static int ibmlana_io; static int startslot; /* counts through slots when probing multiple devices */ -static short ibmlana_adapter_ids[] __initdata = { +static const short ibmlana_adapter_ids[] __devinitconst = { IBM_LANA_ID, 0x0000 }; -static char *ibmlana_adapter_names[] __devinitdata = { +static const char *const ibmlana_adapter_names[] __devinitconst = { "IBM LAN Adapter/A", NULL }; diff --git a/trunk/drivers/net/irda/irtty-sir.c b/trunk/drivers/net/irda/irtty-sir.c index 3352b2443e58..035861d8acb1 100644 --- a/trunk/drivers/net/irda/irtty-sir.c +++ b/trunk/drivers/net/irda/irtty-sir.c @@ -216,23 +216,23 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t * usbserial: urb-complete-interrupt / softint */ -static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) +static unsigned int irtty_receive_buf(struct tty_struct *tty, + const unsigned char *cp, char *fp, int count) { struct sir_dev *dev; struct sirtty_cb *priv = tty->disc_data; int i; - IRDA_ASSERT(priv != NULL, return;); - IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;); + IRDA_ASSERT(priv != NULL, return -ENODEV;); + IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EINVAL;); if (unlikely(count==0)) /* yes, this happens */ - return; + return 0; dev = priv->dev; if (!dev) { IRDA_WARNING("%s(), not ready yet!\n", __func__); - return; + return -ENODEV; } for (i = 0; i < count; i++) { @@ -242,11 +242,13 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp, if (fp && *fp++) { IRDA_DEBUG(0, "Framing or parity error!\n"); sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */ - return; + return -EINVAL; } } sirdev_receive(dev, cp, count); + + return count; } /* diff --git a/trunk/drivers/net/irda/smsc-ircc2.c b/trunk/drivers/net/irda/smsc-ircc2.c index 8800e1fe4129..69b5707db369 100644 --- a/trunk/drivers/net/irda/smsc-ircc2.c +++ b/trunk/drivers/net/irda/smsc-ircc2.c @@ -222,19 +222,19 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self); /* Probing */ -static int __init smsc_ircc_look_for_chips(void); -static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type); -static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type); -static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type); -static int __init smsc_superio_fdc(unsigned short cfg_base); -static int __init smsc_superio_lpc(unsigned short cfg_base); +static int smsc_ircc_look_for_chips(void); +static const struct smsc_chip * smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type); +static int smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type); +static int smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type); +static int smsc_superio_fdc(unsigned short cfg_base); +static int smsc_superio_lpc(unsigned short cfg_base); #ifdef CONFIG_PCI -static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf); -static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); -static void __init preconfigure_ali_port(struct pci_dev *dev, +static int preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf); +static int preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); +static void preconfigure_ali_port(struct pci_dev *dev, unsigned short port); -static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); -static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, +static int preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); +static int smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, unsigned short ircc_fir, unsigned short ircc_sir, unsigned char ircc_dma, @@ -366,7 +366,7 @@ static inline void register_bank(int iobase, int bank) } /* PNP hotplug support */ -static const struct pnp_device_id smsc_ircc_pnp_table[] = { +static const struct pnp_device_id smsc_ircc_pnp_table[] __devinitconst = { { .id = "SMCf010", .driver_data = 0 }, /* and presumably others */ { } @@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = { * Try to open driver instance * */ -static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq) +static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq) { struct smsc_ircc_cb *self; struct net_device *dev; @@ -2273,7 +2273,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho } -static int __init smsc_access(unsigned short cfg_base, unsigned char reg) +static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg) { IRDA_DEBUG(1, "%s\n", __func__); @@ -2281,7 +2281,7 @@ static int __init smsc_access(unsigned short cfg_base, unsigned char reg) return inb(cfg_base) != reg ? -1 : 0; } -static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type) +static const struct smsc_chip * __devinit smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type) { u8 devid, xdevid, rev; @@ -2406,7 +2406,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base) #ifdef CONFIG_PCI #define PCIID_VENDOR_INTEL 0x8086 #define PCIID_VENDOR_ALI 0x10b9 -static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = { +static const struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitconst = { /* * Subsystems needing entries: * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family @@ -2532,7 +2532,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini * (FIR port, SIR port, FIR DMA, FIR IRQ) * through the chip configuration port. */ -static int __init preconfigure_smsc_chip(struct +static int __devinit preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf) { @@ -2633,7 +2633,7 @@ static int __init preconfigure_smsc_chip(struct * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge. * They all work the same way! */ -static int __init preconfigure_through_82801(struct pci_dev *dev, +static int __devinit preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf) @@ -2786,7 +2786,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, * This is based on reverse-engineering since ALi does not * provide any data sheet for the 1533 chip. */ -static void __init preconfigure_ali_port(struct pci_dev *dev, +static void __devinit preconfigure_ali_port(struct pci_dev *dev, unsigned short port) { unsigned char reg; @@ -2824,7 +2824,7 @@ static void __init preconfigure_ali_port(struct pci_dev *dev, IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port); } -static int __init preconfigure_through_ali(struct pci_dev *dev, +static int __devinit preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf) @@ -2837,7 +2837,7 @@ static int __init preconfigure_through_ali(struct pci_dev *dev, return preconfigure_smsc_chip(conf); } -static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, +static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, unsigned short ircc_fir, unsigned short ircc_sir, unsigned char ircc_dma, @@ -2849,7 +2849,7 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, int ret = 0; for_each_pci_dev(dev) { - struct smsc_ircc_subsystem_configuration *conf; + const struct smsc_ircc_subsystem_configuration *conf; /* * Cache the subsystem vendor/device: diff --git a/trunk/drivers/net/ks8842.c b/trunk/drivers/net/ks8842.c index fc12ac0d9f2e..4d40626b3bfa 100644 --- a/trunk/drivers/net/ks8842.c +++ b/trunk/drivers/net/ks8842.c @@ -661,7 +661,7 @@ static void ks8842_rx_frame(struct net_device *netdev, /* check the status */ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { - struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3); + struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); if (skb) { diff --git a/trunk/drivers/net/ne3210.c b/trunk/drivers/net/ne3210.c index 243ed2aee88e..e8984b0ca521 100644 --- a/trunk/drivers/net/ne3210.c +++ b/trunk/drivers/net/ne3210.c @@ -80,17 +80,20 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne #define NE3210_DEBUG 0x0 -static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; -static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0}; -static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"}; -static int ifmap_val[] __initdata = { +static const unsigned char irq_map[] __devinitconst = + { 15, 12, 11, 10, 9, 7, 5, 3 }; +static const unsigned int shmem_map[] __devinitconst = + { 0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0 }; +static const char *const ifmap[] __devinitconst = + { "UTP", "?", "BNC", "AUI" }; +static const int ifmap_val[] __devinitconst = { IF_PORT_10BASET, IF_PORT_UNKNOWN, IF_PORT_10BASE2, IF_PORT_AUI, }; -static int __init ne3210_eisa_probe (struct device *device) +static int __devinit ne3210_eisa_probe (struct device *device) { unsigned long ioaddr, phys_mem; int i, retval, port_index; @@ -313,7 +316,7 @@ static void ne3210_block_output(struct net_device *dev, int count, memcpy_toio(shmem, buf, count); } -static struct eisa_device_id ne3210_ids[] = { +static const struct eisa_device_id ne3210_ids[] __devinitconst = { { "EGL0101" }, { "NVL1801" }, { "" }, diff --git a/trunk/drivers/net/ppp_async.c b/trunk/drivers/net/ppp_async.c index a1b82c9c67d2..53872d7d7382 100644 --- a/trunk/drivers/net/ppp_async.c +++ b/trunk/drivers/net/ppp_async.c @@ -340,7 +340,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) } /* May sleep, don't call from interrupt level or with interrupts disabled */ -static void +static unsigned int ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, char *cflags, int count) { @@ -348,7 +348,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, unsigned long flags; if (!ap) - return; + return -ENODEV; spin_lock_irqsave(&ap->recv_lock, flags); ppp_async_input(ap, buf, cflags, count); spin_unlock_irqrestore(&ap->recv_lock, flags); @@ -356,6 +356,8 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, tasklet_schedule(&ap->tsk); ap_put(ap); tty_unthrottle(tty); + + return count; } static void diff --git a/trunk/drivers/net/ppp_synctty.c b/trunk/drivers/net/ppp_synctty.c index 2573f525f11c..0815790a5cf9 100644 --- a/trunk/drivers/net/ppp_synctty.c +++ b/trunk/drivers/net/ppp_synctty.c @@ -381,7 +381,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait) } /* May sleep, don't call from interrupt level or with interrupts disabled */ -static void +static unsigned int ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, char *cflags, int count) { @@ -389,7 +389,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, unsigned long flags; if (!ap) - return; + return -ENODEV; spin_lock_irqsave(&ap->recv_lock, flags); ppp_sync_input(ap, buf, cflags, count); spin_unlock_irqrestore(&ap->recv_lock, flags); @@ -397,6 +397,8 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, tasklet_schedule(&ap->tsk); sp_put(ap); tty_unthrottle(tty); + + return count; } static void diff --git a/trunk/drivers/net/slip.c b/trunk/drivers/net/slip.c index 8ec1a9a0bb9a..584809c656d5 100644 --- a/trunk/drivers/net/slip.c +++ b/trunk/drivers/net/slip.c @@ -670,16 +670,17 @@ static void sl_setup(struct net_device *dev) * in parallel */ -static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) +static unsigned int slip_receive_buf(struct tty_struct *tty, + const unsigned char *cp, char *fp, int count) { struct slip *sl = tty->disc_data; + int bytes = count; if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) - return; + return -ENODEV; /* Read the characters out of the buffer */ - while (count--) { + while (bytes--) { if (fp && *fp++) { if (!test_and_set_bit(SLF_ERROR, &sl->flags)) sl->dev->stats.rx_errors++; @@ -693,6 +694,8 @@ static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp, #endif slip_unesc(sl, *cp++); } + + return count; } /************************************ diff --git a/trunk/drivers/net/smc-mca.c b/trunk/drivers/net/smc-mca.c index d07c39cb4daf..0f29f261fcfe 100644 --- a/trunk/drivers/net/smc-mca.c +++ b/trunk/drivers/net/smc-mca.c @@ -156,7 +156,7 @@ static const struct { { 14, 15 } }; -static short smc_mca_adapter_ids[] __initdata = { +static const short smc_mca_adapter_ids[] __devinitconst = { 0x61c8, 0x61c9, 0x6fc0, @@ -168,7 +168,7 @@ static short smc_mca_adapter_ids[] __initdata = { 0x0000 }; -static char *smc_mca_adapter_names[] __initdata = { +static const char *const smc_mca_adapter_names[] __devinitconst = { "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)", "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)", "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)", @@ -199,7 +199,7 @@ static const struct net_device_ops ultramca_netdev_ops = { #endif }; -static int __init ultramca_probe(struct device *gen_dev) +static int __devinit ultramca_probe(struct device *gen_dev) { unsigned short ioaddr; struct net_device *dev; diff --git a/trunk/drivers/net/tg3.c b/trunk/drivers/net/tg3.c index a1f9f9eef37d..f4b01c638a33 100644 --- a/trunk/drivers/net/tg3.c +++ b/trunk/drivers/net/tg3.c @@ -5774,7 +5774,7 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi, dma_unmap_addr(txb, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); - for (i = 0; i < last; i++) { + for (i = 0; i <= last; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; entry = NEXT_TX(entry); diff --git a/trunk/drivers/net/tokenring/madgemc.c b/trunk/drivers/net/tokenring/madgemc.c index 2bedc0ace812..1313aa1315f0 100644 --- a/trunk/drivers/net/tokenring/madgemc.c +++ b/trunk/drivers/net/tokenring/madgemc.c @@ -727,7 +727,7 @@ static int __devexit madgemc_remove(struct device *device) return 0; } -static short madgemc_adapter_ids[] __initdata = { +static const short madgemc_adapter_ids[] __devinitconst = { 0x002d, 0x0000 }; diff --git a/trunk/drivers/net/tulip/de4x5.c b/trunk/drivers/net/tulip/de4x5.c index efaa1d69b720..45144d5bd11b 100644 --- a/trunk/drivers/net/tulip/de4x5.c +++ b/trunk/drivers/net/tulip/de4x5.c @@ -1995,7 +1995,7 @@ SetMulticastFilter(struct net_device *dev) static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST; -static int __init de4x5_eisa_probe (struct device *gendev) +static int __devinit de4x5_eisa_probe (struct device *gendev) { struct eisa_device *edev; u_long iobase; @@ -2097,7 +2097,7 @@ static int __devexit de4x5_eisa_remove (struct device *device) return 0; } -static struct eisa_device_id de4x5_eisa_ids[] = { +static const struct eisa_device_id de4x5_eisa_ids[] __devinitconst = { { "DEC4250", 0 }, /* 0 is the board name index... */ { "" } }; diff --git a/trunk/drivers/net/usb/catc.c b/trunk/drivers/net/usb/catc.c index 8056f8a27c6a..d7221c4a5dcf 100644 --- a/trunk/drivers/net/usb/catc.c +++ b/trunk/drivers/net/usb/catc.c @@ -495,7 +495,7 @@ static void catc_ctrl_run(struct catc *catc) if (!q->dir && q->buf && q->len) memcpy(catc->ctrl_buf, q->buf, q->len); - if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC))) + if ((status = usb_submit_urb(catc->ctrl_urb, GFP_KERNEL))) err("submit(ctrl_urb) status %d", status); } diff --git a/trunk/drivers/net/usb/cdc_ncm.c b/trunk/drivers/net/usb/cdc_ncm.c index f33ca6aa29e9..cdd3ae486109 100644 --- a/trunk/drivers/net/usb/cdc_ncm.c +++ b/trunk/drivers/net/usb/cdc_ncm.c @@ -54,7 +54,7 @@ #include #include -#define DRIVER_VERSION "01-June-2011" +#define DRIVER_VERSION "24-May-2011" /* CDC NCM subclass 3.2.1 */ #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 @@ -1234,7 +1234,6 @@ static struct usb_driver cdc_ncm_driver = { .disconnect = cdc_ncm_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, - .reset_resume = usbnet_resume, .supports_autosuspend = 1, }; diff --git a/trunk/drivers/net/virtio_net.c b/trunk/drivers/net/virtio_net.c index f6853247a620..0cb0b0632672 100644 --- a/trunk/drivers/net/virtio_net.c +++ b/trunk/drivers/net/virtio_net.c @@ -609,7 +609,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) * before it gets out of hand. Naturally, this wastes entries. */ if (capacity < 2+MAX_SKB_FRAGS) { netif_stop_queue(dev); - if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) { + if (unlikely(!virtqueue_enable_cb(vi->svq))) { /* More just got used, free them then recheck. */ capacity += free_old_xmit_skbs(vi); if (capacity >= 2+MAX_SKB_FRAGS) { diff --git a/trunk/drivers/net/wan/x25_asy.c b/trunk/drivers/net/wan/x25_asy.c index 24297b274cd4..40398bf7d036 100644 --- a/trunk/drivers/net/wan/x25_asy.c +++ b/trunk/drivers/net/wan/x25_asy.c @@ -517,17 +517,18 @@ static int x25_asy_close(struct net_device *dev) * and sent on to some IP layer for further processing. */ -static void x25_asy_receive_buf(struct tty_struct *tty, +static unsigned int x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct x25_asy *sl = tty->disc_data; + int bytes = count; if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev)) return; /* Read the characters out of the buffer */ - while (count--) { + while (bytes--) { if (fp && *fp++) { if (!test_and_set_bit(SLF_ERROR, &sl->flags)) sl->dev->stats.rx_errors++; @@ -536,6 +537,8 @@ static void x25_asy_receive_buf(struct tty_struct *tty, } x25_asy_unesc(sl, *cp++); } + + return count; } /* diff --git a/trunk/drivers/net/wireless/ath/ath9k/Kconfig b/trunk/drivers/net/wireless/ath/ath9k/Kconfig index d9c08c619a3a..d9ff8413ab9a 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/Kconfig +++ b/trunk/drivers/net/wireless/ath/ath9k/Kconfig @@ -26,6 +26,7 @@ config ATH9K config ATH9K_PCI bool "Atheros ath9k PCI/PCIe bus support" depends on ATH9K && PCI + default PCI ---help--- This option enables the PCI bus support in ath9k. diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 2d4c0910295b..015d97439935 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -829,7 +829,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) if (AR_SREV_9271(ah)) { if (!ar9285_hw_cl_cal(ah, chan)) return false; - } else if (AR_SREV_9285(ah) && AR_SREV_9285_12_OR_LATER(ah)) { + } else if (AR_SREV_9285_12_OR_LATER(ah)) { if (!ar9285_hw_clc(ah, chan)) return false; } else { diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index ff8150e46f0e..0ca7635d0669 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4645,16 +4645,10 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, case 1: break; case 2: - if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN) - scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; - else - scaledPower = 0; + scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; break; case 3: - if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN) - scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; - else - scaledPower = 0; + scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; break; } diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 892c48b15434..eee23ecd118a 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1381,25 +1381,3 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah) "==== BB update: done ====\n\n"); } EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info); - -void ar9003_hw_disable_phy_restart(struct ath_hw *ah) -{ - u32 val; - - /* While receiving unsupported rate frame rx state machine - * gets into a state 0xb and if phy_restart happens in that - * state, BB would go hang. If RXSM is in 0xb state after - * first bb panic, ensure to disable the phy_restart. - */ - if (!((MS(ah->bb_watchdog_last_status, - AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) || - ah->bb_hang_rx_ofdm)) - return; - - ah->bb_hang_rx_ofdm = true; - val = REG_READ(ah, AR_PHY_RESTART); - val &= ~AR_PHY_RESTART_ENA; - - REG_WRITE(ah, AR_PHY_RESTART, val); -} -EXPORT_SYMBOL(ar9003_hw_disable_phy_restart); diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 343fc9f946db..7856f0d4512d 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -524,16 +524,10 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, case 1: break; case 2: - if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN) - scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; - else - scaledPower = 0; + scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; break; case 3: - if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN) - scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; - else - scaledPower = 0; + scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; break; } scaledPower = max((u16)0, scaledPower); diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.c b/trunk/drivers/net/wireless/ath/ath9k/hw.c index 1be7c8bbef84..72543ce8f616 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.c +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.c @@ -1555,12 +1555,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (ah->btcoex_hw.enabled) ath9k_hw_btcoex_enable(ah); - if (AR_SREV_9300_20_OR_LATER(ah)) { + if (AR_SREV_9300_20_OR_LATER(ah)) ar9003_hw_bb_watchdog_config(ah); - ar9003_hw_disable_phy_restart(ah); - } - ath9k_hw_apply_gpio_override(ah); return 0; diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.h b/trunk/drivers/net/wireless/ath/ath9k/hw.h index 4b157c53d1a8..57435ce62792 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.h +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.h @@ -842,7 +842,6 @@ struct ath_hw { u32 bb_watchdog_last_status; u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */ - u8 bb_hang_rx_ofdm; /* true if bb hang due to rx_ofdm */ unsigned int paprd_target_power; unsigned int paprd_training_power; @@ -991,7 +990,6 @@ void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah); void ar9003_hw_bb_watchdog_config(struct ath_hw *ah); void ar9003_hw_bb_watchdog_read(struct ath_hw *ah); void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah); -void ar9003_hw_disable_phy_restart(struct ath_hw *ah); void ar9003_paprd_enable(struct ath_hw *ah, bool val); void ar9003_paprd_populate_single_table(struct ath_hw *ah, struct ath9k_hw_cal_data *caldata, diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c index 2ca351fe6d3c..a198ee374b05 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/main.c @@ -670,8 +670,7 @@ void ath9k_tasklet(unsigned long data) u32 status = sc->intrstatus; u32 rxmask; - if ((status & ATH9K_INT_FATAL) || - (status & ATH9K_INT_BB_WATCHDOG)) { + if (status & ATH9K_INT_FATAL) { ath_reset(sc, true); return; } @@ -738,7 +737,6 @@ irqreturn_t ath_isr(int irq, void *dev) { #define SCHED_INTR ( \ ATH9K_INT_FATAL | \ - ATH9K_INT_BB_WATCHDOG | \ ATH9K_INT_RXORN | \ ATH9K_INT_RXEOL | \ ATH9K_INT_RX | \ diff --git a/trunk/drivers/net/wireless/ath/ath9k/rc.c b/trunk/drivers/net/wireless/ath/ath9k/rc.c index ba7f36ab0a74..17542214c93f 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/rc.c +++ b/trunk/drivers/net/wireless/ath/ath9k/rc.c @@ -689,8 +689,7 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table, if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) { rate->flags |= IEEE80211_TX_RC_MCS; - if (WLAN_RC_PHY_40(rate_table->info[rix].phy) && - conf_is_ht40(&txrc->hw->conf)) + if (WLAN_RC_PHY_40(rate_table->info[rix].phy)) rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy)) rate->flags |= IEEE80211_TX_RC_SHORT_GI; diff --git a/trunk/drivers/net/wireless/b43/phy_n.c b/trunk/drivers/net/wireless/b43/phy_n.c index 05960ddde24e..9ed65157bef5 100644 --- a/trunk/drivers/net/wireless/b43/phy_n.c +++ b/trunk/drivers/net/wireless/b43/phy_n.c @@ -3093,7 +3093,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, int freq; bool avoid = false; u8 length; - u16 tmp, core, type, count, max, numb, last = 0, cmd; + u16 tmp, core, type, count, max, numb, last, cmd; const u16 *table; bool phy6or5x; diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-lib.c index a7a4739880dc..7e5e85a017b5 100644 --- a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-lib.c +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-lib.c @@ -628,11 +628,11 @@ void iwl4965_rx_reply_rx(struct iwl_priv *priv, /* rx_status carries information about the packet to mac80211 */ rx_status.mactime = le64_to_cpu(phy_res->timestamp); - rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), rx_status.band); + rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; rx_status.rate_idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); rx_status.flag = 0; diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965.c index f9db25bb35c3..f5433c74b845 100644 --- a/trunk/drivers/net/wireless/iwlegacy/iwl-4965.c +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965.c @@ -1543,7 +1543,7 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv) s32 temp; temp = iwl4965_hw_get_temperature(priv); - if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp)) + if (temp < 0) return; if (priv->temperature != temp) { diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c index fda6fe08cf91..f8c710db6e6f 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -603,27 +603,19 @@ struct iwl_cfg iwl6050_2abg_cfg = { IWL_DEVICE_6050, }; -#define IWL_DEVICE_6150 \ - .fw_name_pre = IWL6050_FW_PRE, \ - .ucode_api_max = IWL6050_UCODE_API_MAX, \ - .ucode_api_min = IWL6050_UCODE_API_MIN, \ - .ops = &iwl6150_ops, \ - .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ - .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ - .base_params = &iwl6050_base_params, \ - .need_dc_calib = true, \ - .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true - struct iwl_cfg iwl6150_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", - IWL_DEVICE_6150, + .fw_name_pre = IWL6050_FW_PRE, + .ucode_api_max = IWL6050_UCODE_API_MAX, + .ucode_api_min = IWL6050_UCODE_API_MIN, + .eeprom_ver = EEPROM_6150_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, + .ops = &iwl6150_ops, + .base_params = &iwl6050_base_params, .ht_params = &iwl6000_ht_params, -}; - -struct iwl_cfg iwl6150_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG", - IWL_DEVICE_6150, + .need_dc_calib = true, + .led_mode = IWL_LED_RF_STATE, + .internal_wimax_coex = true, }; struct iwl_cfg iwl6000_3agn_cfg = { diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c index a662adcb2adb..11c6c1169e78 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -3831,11 +3831,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { /* 6150 WiFi/WiMax Series */ {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)}, {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)}, {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)}, /* 1000 Series WiFi */ {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h index d1716844002e..2495fe7a58cb 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -89,7 +89,6 @@ extern struct iwl_cfg iwl6000_3agn_cfg; extern struct iwl_cfg iwl6050_2agn_cfg; extern struct iwl_cfg iwl6050_2abg_cfg; extern struct iwl_cfg iwl6150_bgn_cfg; -extern struct iwl_cfg iwl6150_bg_cfg; extern struct iwl_cfg iwl1000_bgn_cfg; extern struct iwl_cfg iwl1000_bg_cfg; extern struct iwl_cfg iwl100_bgn_cfg; diff --git a/trunk/drivers/net/wireless/libertas/cmd.c b/trunk/drivers/net/wireless/libertas/cmd.c index 71c8f3fccfa1..84566db486d2 100644 --- a/trunk/drivers/net/wireless/libertas/cmd.c +++ b/trunk/drivers/net/wireless/libertas/cmd.c @@ -994,8 +994,6 @@ static void lbs_submit_command(struct lbs_private *priv, cmd = cmdnode->cmdbuf; spin_lock_irqsave(&priv->driver_lock, flags); - priv->seqnum++; - cmd->seqnum = cpu_to_le16(priv->seqnum); priv->cur_cmd = cmdnode; spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -1623,9 +1621,11 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, /* Copy the incoming command to the buffer */ memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size); - /* Set command, clean result, move to buffer */ + /* Set sequence number, clean result, move to buffer */ + priv->seqnum++; cmdnode->cmdbuf->command = cpu_to_le16(command); cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size); + cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum); cmdnode->cmdbuf->result = 0; lbs_deb_host("PREP_CMD: command 0x%04x\n", command); diff --git a/trunk/drivers/net/wireless/mwifiex/sdio.h b/trunk/drivers/net/wireless/mwifiex/sdio.h index 4e97e90aa399..a0e9bc5253e0 100644 --- a/trunk/drivers/net/wireless/mwifiex/sdio.h +++ b/trunk/drivers/net/wireless/mwifiex/sdio.h @@ -167,8 +167,8 @@ /* Rx unit register */ #define CARD_RX_UNIT_REG 0x63 -/* Event header len w/o 4 bytes of interface header */ -#define MWIFIEX_EVENT_HEADER_LEN 4 +/* Event header Len*/ +#define MWIFIEX_EVENT_HEADER_LEN 8 /* Max retry number of CMD53 write */ #define MAX_WRITE_IOMEM_RETRY 2 diff --git a/trunk/drivers/net/wireless/rt2x00/Kconfig b/trunk/drivers/net/wireless/rt2x00/Kconfig index b2f8b8fd4d2d..9def1e5369a1 100644 --- a/trunk/drivers/net/wireless/rt2x00/Kconfig +++ b/trunk/drivers/net/wireless/rt2x00/Kconfig @@ -166,6 +166,7 @@ config RT2800USB_RT35XX config RT2800USB_RT53XX bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)" depends on EXPERIMENTAL + default y ---help--- This adds support for rt53xx wireless chipset family to the rt2800pci driver. diff --git a/trunk/drivers/net/wireless/rtlwifi/pci.c b/trunk/drivers/net/wireless/rtlwifi/pci.c index 89100e7c553b..a40952845436 100644 --- a/trunk/drivers/net/wireless/rtlwifi/pci.c +++ b/trunk/drivers/net/wireless/rtlwifi/pci.c @@ -669,6 +669,11 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) &rx_status, (u8 *) pdesc, skb); + pci_unmap_single(rtlpci->pdev, + *((dma_addr_t *) skb->cb), + rtlpci->rxbuffersize, + PCI_DMA_FROMDEVICE); + skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, false, HW_DESC_RXPKT_LEN)); @@ -685,21 +690,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) hdr = rtl_get_hdr(skb); fc = rtl_get_fc(skb); - /* try for new buffer - if allocation fails, drop - * frame and reuse old buffer - */ - new_skb = dev_alloc_skb(rtlpci->rxbuffersize); - if (unlikely(!new_skb)) { - RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), - DBG_DMESG, - ("can't alloc skb for rx\n")); - goto done; - } - pci_unmap_single(rtlpci->pdev, - *((dma_addr_t *) skb->cb), - rtlpci->rxbuffersize, - PCI_DMA_FROMDEVICE); - if (!stats.crc || !stats.hwerror) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); @@ -768,7 +758,15 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) rtl_lps_leave(hw); } + new_skb = dev_alloc_skb(rtlpci->rxbuffersize); + if (unlikely(!new_skb)) { + RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), + DBG_DMESG, + ("can't alloc skb for rx\n")); + goto done; + } skb = new_skb; + /*skb->dev = dev; */ rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci-> rx_ring @@ -1115,13 +1113,6 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw) rtlpci->rx_ring[rx_queue_idx].idx = 0; - /* If amsdu_8k is disabled, set buffersize to 4096. This - * change will reduce memory fragmentation. - */ - if (rtlpci->rxbuffersize > 4096 && - rtlpriv->rtlhal.disable_amsdu_8k) - rtlpci->rxbuffersize = 4096; - for (i = 0; i < rtlpci->rxringcount; i++) { struct sk_buff *skb = dev_alloc_skb(rtlpci->rxbuffersize); diff --git a/trunk/drivers/net/wireless/wl12xx/conf.h b/trunk/drivers/net/wireless/wl12xx/conf.h index c83fefb6662f..1ab6c86aac40 100644 --- a/trunk/drivers/net/wireless/wl12xx/conf.h +++ b/trunk/drivers/net/wireless/wl12xx/conf.h @@ -1157,9 +1157,6 @@ struct conf_sched_scan_settings { /* time to wait on the channel for passive scans (in TUs) */ u32 dwell_time_passive; - /* time to wait on the channel for DFS scans (in TUs) */ - u32 dwell_time_dfs; - /* number of probe requests to send on each channel in active scans */ u8 num_probe_reqs; diff --git a/trunk/drivers/net/wireless/wl12xx/main.c b/trunk/drivers/net/wireless/wl12xx/main.c index e6497dc669df..bc00e52f6445 100644 --- a/trunk/drivers/net/wireless/wl12xx/main.c +++ b/trunk/drivers/net/wireless/wl12xx/main.c @@ -311,7 +311,6 @@ static struct conf_drv_settings default_conf = { .min_dwell_time_active = 8, .max_dwell_time_active = 30, .dwell_time_passive = 100, - .dwell_time_dfs = 150, .num_probe_reqs = 2, .rssi_threshold = -90, .snr_threshold = 0, diff --git a/trunk/drivers/net/wireless/wl12xx/scan.c b/trunk/drivers/net/wireless/wl12xx/scan.c index 56f76abc754d..f37e5a391976 100644 --- a/trunk/drivers/net/wireless/wl12xx/scan.c +++ b/trunk/drivers/net/wireless/wl12xx/scan.c @@ -331,22 +331,16 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl, struct conf_sched_scan_settings *c = &wl->conf.sched_scan; int i, j; u32 flags; - bool force_passive = !req->n_ssids; for (i = 0, j = start; i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS; i++) { flags = req->channels[i]->flags; - if (force_passive) - flags |= IEEE80211_CHAN_PASSIVE_SCAN; - - if ((req->channels[i]->band == band) && - !(flags & IEEE80211_CHAN_DISABLED) && - (!!(flags & IEEE80211_CHAN_RADAR) == radar) && - /* if radar is set, we ignore the passive flag */ - (radar || - !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) { + if (!(flags & IEEE80211_CHAN_DISABLED) && + ((flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive) && + ((flags & IEEE80211_CHAN_RADAR) == radar) && + (req->channels[i]->band == band)) { wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ", req->channels[i]->band, req->channels[i]->center_freq); @@ -356,12 +350,7 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl, wl1271_debug(DEBUG_SCAN, "max_power %d", req->channels[i]->max_power); - if (flags & IEEE80211_CHAN_RADAR) { - channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS; - channels[j].passive_duration = - cpu_to_le16(c->dwell_time_dfs); - } - else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) { + if (flags & IEEE80211_CHAN_PASSIVE_SCAN) { channels[j].passive_duration = cpu_to_le16(c->dwell_time_passive); } else { @@ -370,7 +359,7 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl, channels[j].max_duration = cpu_to_le16(c->max_dwell_time_active); } - channels[j].tx_power_att = req->channels[i]->max_power; + channels[j].tx_power_att = req->channels[j]->max_power; channels[j].channel = req->channels[i]->hw_value; j++; @@ -397,11 +386,7 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl, wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, IEEE80211_BAND_2GHZ, false, false, idx); - /* - * 5GHz channels always start at position 14, not immediately - * after the last 2.4GHz channel - */ - idx = 14; + idx += cfg->active[0]; cfg->passive[1] = wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, @@ -409,23 +394,22 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl, false, true, idx); idx += cfg->passive[1]; - cfg->dfs = + cfg->active[1] = wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, IEEE80211_BAND_5GHZ, - true, true, idx); - idx += cfg->dfs; + false, false, 14); + idx += cfg->active[1]; - cfg->active[1] = + cfg->dfs = wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, IEEE80211_BAND_5GHZ, - false, false, idx); - idx += cfg->active[1]; + true, false, idx); + idx += cfg->dfs; wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d", cfg->active[0], cfg->passive[0]); wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d", cfg->active[1], cfg->passive[1]); - wl1271_debug(DEBUG_SCAN, " DFS: %d", cfg->dfs); return idx; } @@ -437,7 +421,6 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, struct wl1271_cmd_sched_scan_config *cfg = NULL; struct conf_sched_scan_settings *c = &wl->conf.sched_scan; int i, total_channels, ret; - bool force_passive = !req->n_ssids; wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config"); @@ -461,7 +444,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++) cfg->intervals[i] = cpu_to_le32(req->interval); - if (!force_passive && req->ssids[0].ssid_len && req->ssids[0].ssid) { + if (req->ssids[0].ssid_len && req->ssids[0].ssid) { cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC; cfg->ssid_len = req->ssids[0].ssid_len; memcpy(cfg->ssid, req->ssids[0].ssid, @@ -478,7 +461,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, goto out; } - if (!force_passive && cfg->active[0]) { + if (cfg->active[0]) { ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, req->ssids[0].ssid_len, ies->ie[IEEE80211_BAND_2GHZ], @@ -490,7 +473,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, } } - if (!force_passive && cfg->active[1]) { + if (cfg->active[1]) { ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, req->ssids[0].ssid_len, ies->ie[IEEE80211_BAND_5GHZ], diff --git a/trunk/drivers/net/wireless/wl12xx/scan.h b/trunk/drivers/net/wireless/wl12xx/scan.h index a0b6c5d67b07..c83319579ca3 100644 --- a/trunk/drivers/net/wireless/wl12xx/scan.h +++ b/trunk/drivers/net/wireless/wl12xx/scan.h @@ -137,9 +137,6 @@ enum { SCAN_BSS_TYPE_ANY, }; -#define SCAN_CHANNEL_FLAGS_DFS BIT(0) -#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1) - struct conn_scan_ch_params { __le16 min_duration; __le16 max_duration; diff --git a/trunk/drivers/net/wireless/zd1211rw/zd_usb.c b/trunk/drivers/net/wireless/zd1211rw/zd_usb.c index 631194d49828..0e819943b9e4 100644 --- a/trunk/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/trunk/drivers/net/wireless/zd1211rw/zd_usb.c @@ -1533,31 +1533,6 @@ static void __exit usb_exit(void) module_init(usb_init); module_exit(usb_exit); -static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len, - int *actual_length, int timeout) -{ - /* In USB 2.0 mode EP_REGS_OUT endpoint is interrupt type. However in - * USB 1.1 mode endpoint is bulk. Select correct type URB by endpoint - * descriptor. - */ - struct usb_host_endpoint *ep; - unsigned int pipe; - - pipe = usb_sndintpipe(udev, EP_REGS_OUT); - ep = usb_pipe_endpoint(udev, pipe); - if (!ep) - return -EINVAL; - - if (usb_endpoint_xfer_int(&ep->desc)) { - return usb_interrupt_msg(udev, pipe, data, len, - actual_length, timeout); - } else { - pipe = usb_sndbulkpipe(udev, EP_REGS_OUT); - return usb_bulk_msg(udev, pipe, data, len, actual_length, - timeout); - } -} - static int usb_int_regs_length(unsigned int count) { return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data); @@ -1673,14 +1648,15 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, udev = zd_usb_to_usbdev(usb); prepare_read_regs_int(usb); - r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/); + r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT), + req, req_len, &actual_req_len, 50 /* ms */); if (r) { dev_dbg_f(zd_usb_dev(usb), - "error in zd_ep_regs_out_msg(). Error number %d\n", r); + "error in usb_interrupt_msg(). Error number %d\n", r); goto error; } if (req_len != actual_req_len) { - dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()\n" + dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n" " req_len %d != actual_req_len %d\n", req_len, actual_req_len); r = -EIO; @@ -1842,17 +1818,9 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, rw->value = cpu_to_le16(ioreqs[i].value); } - /* In USB 2.0 mode endpoint is interrupt type. However in USB 1.1 mode - * endpoint is bulk. Select correct type URB by endpoint descriptor. - */ - if (usb_endpoint_xfer_int(&ep->desc)) - usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT), - req, req_len, iowrite16v_urb_complete, usb, - ep->desc.bInterval); - else - usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_REGS_OUT), - req, req_len, iowrite16v_urb_complete, usb); - + usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT), + req, req_len, iowrite16v_urb_complete, usb, + ep->desc.bInterval); urb->transfer_flags |= URB_FREE_BUFFER; /* Submit previous URB */ @@ -1956,14 +1924,15 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) } udev = zd_usb_to_usbdev(usb); - r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/); + r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT), + req, req_len, &actual_req_len, 50 /* ms */); if (r) { dev_dbg_f(zd_usb_dev(usb), - "error in zd_ep_regs_out_msg(). Error number %d\n", r); + "error in usb_interrupt_msg(). Error number %d\n", r); goto out; } if (req_len != actual_req_len) { - dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()" + dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()" " req_len %d != actual_req_len %d\n", req_len, actual_req_len); r = -EIO; diff --git a/trunk/drivers/pci/dmar.c b/trunk/drivers/pci/dmar.c index 3dc9befa5aec..12e02bf92c4a 100644 --- a/trunk/drivers/pci/dmar.c +++ b/trunk/drivers/pci/dmar.c @@ -698,7 +698,12 @@ int __init detect_intel_iommu(void) { #ifdef CONFIG_INTR_REMAP struct acpi_table_dmar *dmar; - + /* + * for now we will disable dma-remapping when interrupt + * remapping is enabled. + * When support for queued invalidation for IOTLB invalidation + * is added, we will not need this any more. + */ dmar = (struct acpi_table_dmar *) dmar_tbl; if (ret && cpu_has_x2apic && dmar->flags & 0x1) printk(KERN_INFO diff --git a/trunk/drivers/pci/intel-iommu.c b/trunk/drivers/pci/intel-iommu.c index 59f17acf7f68..6af6b628175b 100644 --- a/trunk/drivers/pci/intel-iommu.c +++ b/trunk/drivers/pci/intel-iommu.c @@ -47,8 +47,6 @@ #define ROOT_SIZE VTD_PAGE_SIZE #define CONTEXT_SIZE VTD_PAGE_SIZE -#define IS_BRIDGE_HOST_DEVICE(pdev) \ - ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST) #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) @@ -118,11 +116,6 @@ static inline unsigned long align_to_level(unsigned long pfn, int level) return (pfn + level_size(level) - 1) & level_mask(level); } -static inline unsigned long lvl_to_nr_pages(unsigned int lvl) -{ - return 1 << ((lvl - 1) * LEVEL_STRIDE); -} - /* VT-d pages must always be _smaller_ than MM pages. Otherwise things are never going to work. */ static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) @@ -149,12 +142,6 @@ static struct intel_iommu **g_iommus; static void __init check_tylersburg_isoch(void); static int rwbf_quirk; -/* - * set to 1 to panic kernel if can't successfully enable VT-d - * (used when kernel is launched w/ TXT) - */ -static int force_on = 0; - /* * 0: Present * 1-11: Reserved @@ -351,9 +338,6 @@ struct dmar_domain { int iommu_coherency;/* indicate coherency of iommu access */ int iommu_snooping; /* indicate snooping control feature*/ int iommu_count; /* reference count of iommu */ - int iommu_superpage;/* Level of superpages supported: - 0 == 4KiB (no superpages), 1 == 2MiB, - 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ spinlock_t iommu_lock; /* protect iommu set in domain */ u64 max_addr; /* maximum mapped address */ }; @@ -403,7 +387,6 @@ int dmar_disabled = 1; static int dmar_map_gfx = 1; static int dmar_forcedac; static int intel_iommu_strict; -static int intel_iommu_superpage = 1; #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) static DEFINE_SPINLOCK(device_domain_lock); @@ -434,10 +417,6 @@ static int __init intel_iommu_setup(char *str) printk(KERN_INFO "Intel-IOMMU: disable batched IOTLB flush\n"); intel_iommu_strict = 1; - } else if (!strncmp(str, "sp_off", 6)) { - printk(KERN_INFO - "Intel-IOMMU: disable supported super page\n"); - intel_iommu_superpage = 0; } str += strcspn(str, ","); @@ -576,32 +555,11 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain) } } -static void domain_update_iommu_superpage(struct dmar_domain *domain) -{ - int i, mask = 0xf; - - if (!intel_iommu_superpage) { - domain->iommu_superpage = 0; - return; - } - - domain->iommu_superpage = 4; /* 1TiB */ - - for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { - mask |= cap_super_page_val(g_iommus[i]->cap); - if (!mask) { - break; - } - } - domain->iommu_superpage = fls(mask); -} - /* Some capabilities may be different across iommus */ static void domain_update_iommu_cap(struct dmar_domain *domain) { domain_update_iommu_coherency(domain); domain_update_iommu_snooping(domain); - domain_update_iommu_superpage(domain); } static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) @@ -731,31 +689,23 @@ static void free_context_table(struct intel_iommu *iommu) } static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, - unsigned long pfn, int large_level) + unsigned long pfn) { int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; struct dma_pte *parent, *pte = NULL; int level = agaw_to_level(domain->agaw); - int offset, target_level; + int offset; BUG_ON(!domain->pgd); BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); parent = domain->pgd; - /* Search pte */ - if (!large_level) - target_level = 1; - else - target_level = large_level; - while (level > 0) { void *tmp_page; offset = pfn_level_offset(pfn, level); pte = &parent[offset]; - if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) - break; - if (level == target_level) + if (level == 1) break; if (!dma_pte_present(pte)) { @@ -783,11 +733,10 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, return pte; } - /* return address's pte at specific level */ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, unsigned long pfn, - int level, int *large_page) + int level) { struct dma_pte *parent, *pte = NULL; int total = agaw_to_level(domain->agaw); @@ -800,16 +749,8 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, if (level == total) return pte; - if (!dma_pte_present(pte)) { - *large_page = total; + if (!dma_pte_present(pte)) break; - } - - if (pte->val & DMA_PTE_LARGE_PAGE) { - *large_page = total; - return pte; - } - parent = phys_to_virt(dma_pte_addr(pte)); total--; } @@ -822,7 +763,6 @@ static void dma_pte_clear_range(struct dmar_domain *domain, unsigned long last_pfn) { int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - unsigned int large_page = 1; struct dma_pte *first_pte, *pte; BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); @@ -831,15 +771,14 @@ static void dma_pte_clear_range(struct dmar_domain *domain, /* we don't need lock here; nobody else touches the iova range */ do { - large_page = 1; - first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); + first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); if (!pte) { - start_pfn = align_to_level(start_pfn + 1, large_page + 1); + start_pfn = align_to_level(start_pfn + 1, 2); continue; } - do { + do { dma_clear_pte(pte); - start_pfn += lvl_to_nr_pages(large_page); + start_pfn++; pte++; } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); @@ -859,7 +798,6 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, int total = agaw_to_level(domain->agaw); int level; unsigned long tmp; - int large_page = 2; BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); @@ -875,10 +813,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, return; do { - large_page = level; - first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page); - if (large_page > level) - level = large_page + 1; + first_pte = pte = dma_pfn_level_pte(domain, tmp, level); if (!pte) { tmp = align_to_level(tmp + 1, level + 1); continue; @@ -1462,7 +1397,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width) else domain->iommu_snooping = 0; - domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); domain->iommu_count = 1; domain->nid = iommu->node; @@ -1483,10 +1417,6 @@ static void domain_exit(struct dmar_domain *domain) if (!domain) return; - /* Flush any lazy unmaps that may reference this domain */ - if (!intel_iommu_strict) - flush_unmaps_timeout(0); - domain_remove_dev_info(domain); /* destroy iovas */ put_iova_domain(&domain->iovad); @@ -1718,34 +1648,6 @@ static inline unsigned long aligned_nrpages(unsigned long host_addr, return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; } -/* Return largest possible superpage level for a given mapping */ -static inline int hardware_largepage_caps(struct dmar_domain *domain, - unsigned long iov_pfn, - unsigned long phy_pfn, - unsigned long pages) -{ - int support, level = 1; - unsigned long pfnmerge; - - support = domain->iommu_superpage; - - /* To use a large page, the virtual *and* physical addresses - must be aligned to 2MiB/1GiB/etc. Lower bits set in either - of them will mean we have to use smaller pages. So just - merge them and check both at once. */ - pfnmerge = iov_pfn | phy_pfn; - - while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) { - pages >>= VTD_STRIDE_SHIFT; - if (!pages) - break; - pfnmerge >>= VTD_STRIDE_SHIFT; - level++; - support--; - } - return level; -} - static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, struct scatterlist *sg, unsigned long phys_pfn, unsigned long nr_pages, int prot) @@ -1754,8 +1656,6 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, phys_addr_t uninitialized_var(pteval); int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; unsigned long sg_res; - unsigned int largepage_lvl = 0; - unsigned long lvl_pages = 0; BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); @@ -1771,7 +1671,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; } - while (nr_pages > 0) { + while (nr_pages--) { uint64_t tmp; if (!sg_res) { @@ -1779,21 +1679,11 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; sg->dma_length = sg->length; pteval = page_to_phys(sg_page(sg)) | prot; - phys_pfn = pteval >> VTD_PAGE_SHIFT; } - if (!pte) { - largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); - - first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl); + first_pte = pte = pfn_to_dma_pte(domain, iov_pfn); if (!pte) return -ENOMEM; - /* It is large page*/ - if (largepage_lvl > 1) - pteval |= DMA_PTE_LARGE_PAGE; - else - pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; - } /* We don't need lock here, nobody else * touches the iova range @@ -1809,38 +1699,16 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, } WARN_ON(1); } - - lvl_pages = lvl_to_nr_pages(largepage_lvl); - - BUG_ON(nr_pages < lvl_pages); - BUG_ON(sg_res < lvl_pages); - - nr_pages -= lvl_pages; - iov_pfn += lvl_pages; - phys_pfn += lvl_pages; - pteval += lvl_pages * VTD_PAGE_SIZE; - sg_res -= lvl_pages; - - /* If the next PTE would be the first in a new page, then we - need to flush the cache on the entries we've just written. - And then we'll need to recalculate 'pte', so clear it and - let it get set again in the if (!pte) block above. - - If we're done (!nr_pages) we need to flush the cache too. - - Also if we've been setting superpages, we may need to - recalculate 'pte' and switch back to smaller pages for the - end of the mapping, if the trailing size is not enough to - use another superpage (i.e. sg_res < lvl_pages). */ pte++; - if (!nr_pages || first_pte_in_page(pte) || - (largepage_lvl > 1 && sg_res < lvl_pages)) { + if (!nr_pages || first_pte_in_page(pte)) { domain_flush_cache(domain, first_pte, (void *)pte - (void *)first_pte); pte = NULL; } - - if (!sg_res && nr_pages) + iov_pfn++; + pteval += VTD_PAGE_SIZE; + sg_res--; + if (!sg_res) sg = sg_next(sg); } return 0; @@ -2148,7 +2016,7 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) return 0; return iommu_prepare_identity_map(pdev, rmrr->base_address, - rmrr->end_address); + rmrr->end_address + 1); } #ifdef CONFIG_DMAR_FLOPPY_WA @@ -2162,7 +2030,7 @@ static inline void iommu_prepare_isa(void) return; printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); - ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1); + ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); if (ret) printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " @@ -2238,10 +2106,10 @@ static int identity_mapping(struct pci_dev *pdev) if (likely(!iommu_identity_mapping)) return 0; - info = pdev->dev.archdata.iommu; - if (info && info != DUMMY_DEVICE_DOMAIN_INFO) - return (info->domain == si_domain); + list_for_each_entry(info, &si_domain->devices, link) + if (info->dev == pdev) + return 1; return 0; } @@ -2319,19 +2187,8 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup) * Assume that they will -- if they turn out not to be, then we can * take them out of the 1:1 domain later. */ - if (!startup) { - /* - * If the device's dma_mask is less than the system's memory - * size then this is not a candidate for identity mapping. - */ - u64 dma_mask = pdev->dma_mask; - - if (pdev->dev.coherent_dma_mask && - pdev->dev.coherent_dma_mask < dma_mask) - dma_mask = pdev->dev.coherent_dma_mask; - - return dma_mask >= dma_get_required_mask(&pdev->dev); - } + if (!startup) + return pdev->dma_mask > DMA_BIT_MASK(32); return 1; } @@ -2346,9 +2203,6 @@ static int __init iommu_prepare_static_identity_mapping(int hw) return -EFAULT; for_each_pci_dev(pdev) { - /* Skip Host/PCI Bridge devices */ - if (IS_BRIDGE_HOST_DEVICE(pdev)) - continue; if (iommu_should_identity_map(pdev, 1)) { printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n", hw ? "hardware" : "software", pci_name(pdev)); @@ -2364,7 +2218,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw) return 0; } -static int __init init_dmars(void) +static int __init init_dmars(int force_on) { struct dmar_drhd_unit *drhd; struct dmar_rmrr_unit *rmrr; @@ -2738,7 +2592,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, iommu = domain_get_iommu(domain); size = aligned_nrpages(paddr, size); - iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask); + iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), + pdev->dma_mask); if (!iova) goto error; @@ -3263,17 +3118,7 @@ static int init_iommu_hw(void) if (iommu->qi) dmar_reenable_qi(iommu); - for_each_iommu(iommu, drhd) { - if (drhd->ignored) { - /* - * we always have to disable PMRs or DMA may fail on - * this device - */ - if (force_on) - iommu_disable_protect_mem_regions(iommu); - continue; - } - + for_each_active_iommu(iommu, drhd) { iommu_flush_write_buffer(iommu); iommu_set_root_entry(iommu); @@ -3282,8 +3127,7 @@ static int init_iommu_hw(void) DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); - if (iommu_enable_translation(iommu)) - return 1; + iommu_enable_translation(iommu); iommu_disable_protect_mem_regions(iommu); } @@ -3350,10 +3194,7 @@ static void iommu_resume(void) unsigned long flag; if (init_iommu_hw()) { - if (force_on) - panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); - else - WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); + WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); return; } @@ -3430,6 +3271,7 @@ static struct notifier_block device_nb = { int __init intel_iommu_init(void) { int ret = 0; + int force_on = 0; /* VT-d is required for a TXT/tboot launch, so enforce that */ force_on = tboot_force_iommu(); @@ -3467,7 +3309,7 @@ int __init intel_iommu_init(void) init_no_remapping_devices(); - ret = init_dmars(); + ret = init_dmars(force_on); if (ret) { if (force_on) panic("tboot: Failed to initialize DMARs\n"); @@ -3538,8 +3380,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, spin_lock_irqsave(&device_domain_lock, flags); list_for_each_safe(entry, tmp, &domain->devices) { info = list_entry(entry, struct device_domain_info, link); - if (info->segment == pci_domain_nr(pdev->bus) && - info->bus == pdev->bus->number && + /* No need to compare PCI domain; it has to be the same */ + if (info->bus == pdev->bus->number && info->devfn == pdev->devfn) { list_del(&info->link); list_del(&info->global); @@ -3577,13 +3419,10 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, domain_update_iommu_cap(domain); spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); - if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && - !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) { - spin_lock_irqsave(&iommu->lock, tmp_flags); - clear_bit(domain->id, iommu->domain_ids); - iommu->domains[domain->id] = NULL; - spin_unlock_irqrestore(&iommu->lock, tmp_flags); - } + spin_lock_irqsave(&iommu->lock, tmp_flags); + clear_bit(domain->id, iommu->domain_ids); + iommu->domains[domain->id] = NULL; + spin_unlock_irqrestore(&iommu->lock, tmp_flags); } spin_unlock_irqrestore(&device_domain_lock, flags); @@ -3666,7 +3505,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) domain->iommu_count = 0; domain->iommu_coherency = 0; domain->iommu_snooping = 0; - domain->iommu_superpage = 0; domain->max_addr = 0; domain->nid = -1; @@ -3882,7 +3720,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, struct dma_pte *pte; u64 phys = 0; - pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0); + pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT); if (pte) phys = dma_pte_addr(pte); diff --git a/trunk/drivers/pci/iova.c b/trunk/drivers/pci/iova.c index c5c274ab5c5a..9606e599a475 100644 --- a/trunk/drivers/pci/iova.c +++ b/trunk/drivers/pci/iova.c @@ -63,16 +63,8 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) curr = iovad->cached32_node; cached_iova = container_of(curr, struct iova, node); - if (free->pfn_lo >= cached_iova->pfn_lo) { - struct rb_node *node = rb_next(&free->node); - struct iova *iova = container_of(node, struct iova, node); - - /* only cache if it's below 32bit pfn */ - if (node && iova->pfn_lo < iovad->dma_32bit_pfn) - iovad->cached32_node = node; - else - iovad->cached32_node = NULL; - } + if (free->pfn_lo >= cached_iova->pfn_lo) + iovad->cached32_node = rb_next(&free->node); } /* Computes the padding size required, to make the diff --git a/trunk/drivers/scsi/scsi_scan.c b/trunk/drivers/scsi/scsi_scan.c index 44e8ca398efa..58584dc0724a 100644 --- a/trunk/drivers/scsi/scsi_scan.c +++ b/trunk/drivers/scsi/scsi_scan.c @@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, kfree(sdev); goto out; } - blk_get_queue(sdev->request_queue); + sdev->request_queue->queuedata = sdev; scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); diff --git a/trunk/drivers/scsi/scsi_sysfs.c b/trunk/drivers/scsi/scsi_sysfs.c index e0bd3f790fca..e63912510fb9 100644 --- a/trunk/drivers/scsi/scsi_sysfs.c +++ b/trunk/drivers/scsi/scsi_sysfs.c @@ -322,7 +322,6 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) kfree(evt); } - blk_put_queue(sdev->request_queue); /* NULL queue means the device can't be used */ sdev->request_queue = NULL; diff --git a/trunk/drivers/tty/n_gsm.c b/trunk/drivers/tty/n_gsm.c index 09e8c7d53af3..a4c42a75a3bf 100644 --- a/trunk/drivers/tty/n_gsm.c +++ b/trunk/drivers/tty/n_gsm.c @@ -2128,8 +2128,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) gsm->tty = NULL; } -static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) +static unsigned int gsmld_receive_buf(struct tty_struct *tty, + const unsigned char *cp, char *fp, int count) { struct gsm_mux *gsm = tty->disc_data; const unsigned char *dp; @@ -2162,6 +2162,8 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp, } /* FASYNC if needed ? */ /* If clogged call tty_throttle(tty); */ + + return count; } /** diff --git a/trunk/drivers/tty/n_hdlc.c b/trunk/drivers/tty/n_hdlc.c index cea56033b34c..cac666314aef 100644 --- a/trunk/drivers/tty/n_hdlc.c +++ b/trunk/drivers/tty/n_hdlc.c @@ -188,8 +188,8 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, poll_table *wait); static int n_hdlc_tty_open(struct tty_struct *tty); static void n_hdlc_tty_close(struct tty_struct *tty); -static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp, - char *fp, int count); +static unsigned int n_hdlc_tty_receive(struct tty_struct *tty, + const __u8 *cp, char *fp, int count); static void n_hdlc_tty_wakeup(struct tty_struct *tty); #define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f))) @@ -509,8 +509,8 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty) * Called by tty low level driver when receive data is available. Data is * interpreted as one HDLC frame. */ -static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data, - char *flags, int count) +static unsigned int n_hdlc_tty_receive(struct tty_struct *tty, + const __u8 *data, char *flags, int count) { register struct n_hdlc *n_hdlc = tty2n_hdlc (tty); register struct n_hdlc_buf *buf; @@ -521,20 +521,20 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data, /* This can happen if stuff comes in on the backup tty */ if (!n_hdlc || tty != n_hdlc->tty) - return; + return -ENODEV; /* verify line is using HDLC discipline */ if (n_hdlc->magic != HDLC_MAGIC) { printk("%s(%d) line not using HDLC discipline\n", __FILE__,__LINE__); - return; + return -EINVAL; } if ( count>maxframe ) { if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d) rx count>maxframesize, data discarded\n", __FILE__,__LINE__); - return; + return -EINVAL; } /* get a free HDLC buffer */ @@ -550,7 +550,7 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data, if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d) no more rx buffers, data discarded\n", __FILE__,__LINE__); - return; + return -EINVAL; } /* copy received data to HDLC buffer */ @@ -565,6 +565,8 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data, if (n_hdlc->tty->fasync != NULL) kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN); + return count; + } /* end of n_hdlc_tty_receive() */ /** diff --git a/trunk/drivers/tty/n_r3964.c b/trunk/drivers/tty/n_r3964.c index 5c6c31459a2f..a4bc39c21a43 100644 --- a/trunk/drivers/tty/n_r3964.c +++ b/trunk/drivers/tty/n_r3964.c @@ -139,8 +139,8 @@ static int r3964_ioctl(struct tty_struct *tty, struct file *file, static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old); static unsigned int r3964_poll(struct tty_struct *tty, struct file *file, struct poll_table_struct *wait); -static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count); +static unsigned int r3964_receive_buf(struct tty_struct *tty, + const unsigned char *cp, char *fp, int count); static struct tty_ldisc_ops tty_ldisc_N_R3964 = { .owner = THIS_MODULE, @@ -1239,8 +1239,8 @@ static unsigned int r3964_poll(struct tty_struct *tty, struct file *file, return result; } -static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) +static unsigned int r3964_receive_buf(struct tty_struct *tty, + const unsigned char *cp, char *fp, int count) { struct r3964_info *pInfo = tty->disc_data; const unsigned char *p; @@ -1257,6 +1257,8 @@ static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp, } } + + return count; } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/tty/n_tty.c b/trunk/drivers/tty/n_tty.c index 0ad32888091c..95d0a9c2dd13 100644 --- a/trunk/drivers/tty/n_tty.c +++ b/trunk/drivers/tty/n_tty.c @@ -81,38 +81,6 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x, return put_user(x, ptr); } -/** - * n_tty_set__room - receive space - * @tty: terminal - * - * Called by the driver to find out how much data it is - * permitted to feed to the line discipline without any being lost - * and thus to manage flow control. Not serialized. Answers for the - * "instant". - */ - -static void n_tty_set_room(struct tty_struct *tty) -{ - /* tty->read_cnt is not read locked ? */ - int left = N_TTY_BUF_SIZE - tty->read_cnt - 1; - int old_left; - - /* - * If we are doing input canonicalization, and there are no - * pending newlines, let characters through without limit, so - * that erase characters will be handled. Other excess - * characters will be beeped. - */ - if (left <= 0) - left = tty->icanon && !tty->canon_data; - old_left = tty->receive_room; - tty->receive_room = left; - - /* Did this open up the receive buffer? We may need to flip */ - if (left && !old_left) - schedule_work(&tty->buf.work); -} - static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty) { if (tty->read_cnt < N_TTY_BUF_SIZE) { @@ -184,7 +152,6 @@ static void reset_buffer_flags(struct tty_struct *tty) tty->canon_head = tty->canon_data = tty->erasing = 0; memset(&tty->read_flags, 0, sizeof tty->read_flags); - n_tty_set_room(tty); check_unthrottle(tty); } @@ -1360,17 +1327,19 @@ static void n_tty_write_wakeup(struct tty_struct *tty) * calls one at a time and in order (or using flush_to_ldisc) */ -static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) +static unsigned int n_tty_receive_buf(struct tty_struct *tty, + const unsigned char *cp, char *fp, int count) { const unsigned char *p; char *f, flags = TTY_NORMAL; int i; char buf[64]; unsigned long cpuflags; + int left; + int ret = 0; if (!tty->read_buf) - return; + return 0; if (tty->real_raw) { spin_lock_irqsave(&tty->read_lock, cpuflags); @@ -1380,6 +1349,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, memcpy(tty->read_buf + tty->read_head, cp, i); tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1); tty->read_cnt += i; + ret += i; cp += i; count -= i; @@ -1389,8 +1359,10 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, memcpy(tty->read_buf + tty->read_head, cp, i); tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1); tty->read_cnt += i; + ret += i; spin_unlock_irqrestore(&tty->read_lock, cpuflags); } else { + ret = count; for (i = count, p = cp, f = fp; i; i--, p++) { if (f) flags = *f++; @@ -1418,8 +1390,6 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, tty->ops->flush_chars(tty); } - n_tty_set_room(tty); - if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) || L_EXTPROC(tty)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); @@ -1432,8 +1402,12 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, * mode. We don't want to throttle the driver if we're in * canonical mode and don't have a newline yet! */ - if (tty->receive_room < TTY_THRESHOLD_THROTTLE) + left = N_TTY_BUF_SIZE - tty->read_cnt - 1; + + if (left < TTY_THRESHOLD_THROTTLE) tty_throttle(tty); + + return ret; } int is_ignored(int sig) @@ -1477,7 +1451,6 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) if (test_bit(TTY_HW_COOK_IN, &tty->flags)) { tty->raw = 1; tty->real_raw = 1; - n_tty_set_room(tty); return; } if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) || @@ -1530,7 +1503,6 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) else tty->real_raw = 0; } - n_tty_set_room(tty); /* The termios change make the tty ready for I/O */ wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->read_wait); @@ -1812,8 +1784,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, retval = -ERESTARTSYS; break; } - /* FIXME: does n_tty_set_room need locking ? */ - n_tty_set_room(tty); timeout = schedule_timeout(timeout); continue; } @@ -1885,10 +1855,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, * we won't get any more characters. */ - if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) { - n_tty_set_room(tty); + if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) check_unthrottle(tty); - } if (b - buf >= minimum) break; @@ -1910,7 +1878,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, } else if (test_and_clear_bit(TTY_PUSH, &tty->flags)) goto do_it_again; - n_tty_set_room(tty); return retval; } diff --git a/trunk/drivers/tty/tty_buffer.c b/trunk/drivers/tty/tty_buffer.c index f1a7918d71aa..46de2e075dac 100644 --- a/trunk/drivers/tty/tty_buffer.c +++ b/trunk/drivers/tty/tty_buffer.c @@ -416,6 +416,7 @@ static void flush_to_ldisc(struct work_struct *work) struct tty_buffer *head, *tail = tty->buf.tail; int seen_tail = 0; while ((head = tty->buf.head) != NULL) { + int copied; int count; char *char_buf; unsigned char *flag_buf; @@ -442,17 +443,19 @@ static void flush_to_ldisc(struct work_struct *work) line discipline as we want to empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) break; - if (!tty->receive_room || seen_tail) - break; - if (count > tty->receive_room) - count = tty->receive_room; char_buf = head->char_buf_ptr + head->read; flag_buf = head->flag_buf_ptr + head->read; - head->read += count; spin_unlock_irqrestore(&tty->buf.lock, flags); - disc->ops->receive_buf(tty, char_buf, + copied = disc->ops->receive_buf(tty, char_buf, flag_buf, count); spin_lock_irqsave(&tty->buf.lock, flags); + + head->read += copied; + + if (copied == 0 || seen_tail) { + schedule_work(&tty->buf.work); + break; + } } clear_bit(TTY_FLUSHING, &tty->flags); } diff --git a/trunk/drivers/tty/vt/selection.c b/trunk/drivers/tty/vt/selection.c index fb864e7fcd13..67b1d0d7c8ac 100644 --- a/trunk/drivers/tty/vt/selection.c +++ b/trunk/drivers/tty/vt/selection.c @@ -332,8 +332,7 @@ int paste_selection(struct tty_struct *tty) continue; } count = sel_buffer_lth - pasted; - count = min(count, tty->receive_room); - tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted, + count = tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted, NULL, count); pasted += count; } diff --git a/trunk/drivers/vhost/net.c b/trunk/drivers/vhost/net.c index e224a92baa16..2f7c76a85e53 100644 --- a/trunk/drivers/vhost/net.c +++ b/trunk/drivers/vhost/net.c @@ -144,7 +144,7 @@ static void handle_tx(struct vhost_net *net) } mutex_lock(&vq->mutex); - vhost_disable_notify(&net->dev, vq); + vhost_disable_notify(vq); if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); @@ -166,8 +166,8 @@ static void handle_tx(struct vhost_net *net) set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); break; } - if (unlikely(vhost_enable_notify(&net->dev, vq))) { - vhost_disable_notify(&net->dev, vq); + if (unlikely(vhost_enable_notify(vq))) { + vhost_disable_notify(vq); continue; } break; @@ -315,7 +315,7 @@ static void handle_rx(struct vhost_net *net) return; mutex_lock(&vq->mutex); - vhost_disable_notify(&net->dev, vq); + vhost_disable_notify(vq); vhost_hlen = vq->vhost_hlen; sock_hlen = vq->sock_hlen; @@ -334,10 +334,10 @@ static void handle_rx(struct vhost_net *net) break; /* OK, now we need to know about added descriptors. */ if (!headcount) { - if (unlikely(vhost_enable_notify(&net->dev, vq))) { + if (unlikely(vhost_enable_notify(vq))) { /* They have slipped one in as we were * doing that: check again. */ - vhost_disable_notify(&net->dev, vq); + vhost_disable_notify(vq); continue; } /* Nothing new? Wait for eventfd to tell us diff --git a/trunk/drivers/vhost/test.c b/trunk/drivers/vhost/test.c index 734e1d74ad80..099f30230d06 100644 --- a/trunk/drivers/vhost/test.c +++ b/trunk/drivers/vhost/test.c @@ -49,7 +49,7 @@ static void handle_vq(struct vhost_test *n) return; mutex_lock(&vq->mutex); - vhost_disable_notify(&n->dev, vq); + vhost_disable_notify(vq); for (;;) { head = vhost_get_vq_desc(&n->dev, vq, vq->iov, @@ -61,8 +61,8 @@ static void handle_vq(struct vhost_test *n) break; /* Nothing new? Wait for eventfd to tell us they refilled. */ if (head == vq->num) { - if (unlikely(vhost_enable_notify(&n->dev, vq))) { - vhost_disable_notify(&n->dev, vq); + if (unlikely(vhost_enable_notify(vq))) { + vhost_disable_notify(vq); continue; } break; diff --git a/trunk/drivers/vhost/vhost.c b/trunk/drivers/vhost/vhost.c index ea966b356352..7aa4eea930f1 100644 --- a/trunk/drivers/vhost/vhost.c +++ b/trunk/drivers/vhost/vhost.c @@ -37,9 +37,6 @@ enum { VHOST_MEMORY_F_LOG = 0x1, }; -#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num]) -#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num]) - static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) { @@ -164,8 +161,6 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->last_avail_idx = 0; vq->avail_idx = 0; vq->last_used_idx = 0; - vq->signalled_used = 0; - vq->signalled_used_valid = false; vq->used_flags = 0; vq->log_used = false; vq->log_addr = -1ull; @@ -494,17 +489,16 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, return 1; } -static int vq_access_ok(struct vhost_dev *d, unsigned int num, +static int vq_access_ok(unsigned int num, struct vring_desc __user *desc, struct vring_avail __user *avail, struct vring_used __user *used) { - size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; return access_ok(VERIFY_READ, desc, num * sizeof *desc) && access_ok(VERIFY_READ, avail, - sizeof *avail + num * sizeof *avail->ring + s) && + sizeof *avail + num * sizeof *avail->ring) && access_ok(VERIFY_WRITE, used, - sizeof *used + num * sizeof *used->ring + s); + sizeof *used + num * sizeof *used->ring); } /* Can we log writes? */ @@ -520,11 +514,9 @@ int vhost_log_access_ok(struct vhost_dev *dev) /* Verify access for write logging. */ /* Caller should have vq mutex and device mutex */ -static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, - void __user *log_base) +static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) { struct vhost_memory *mp; - size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; mp = rcu_dereference_protected(vq->dev->memory, lockdep_is_held(&vq->mutex)); @@ -532,15 +524,15 @@ static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && (!vq->log_used || log_access_ok(log_base, vq->log_addr, sizeof *vq->used + - vq->num * sizeof *vq->used->ring + s)); + vq->num * sizeof *vq->used->ring)); } /* Can we start vq? */ /* Caller should have vq mutex and device mutex */ int vhost_vq_access_ok(struct vhost_virtqueue *vq) { - return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && - vq_log_access_ok(vq->dev, vq, vq->log_base); + return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) && + vq_log_access_ok(vq, vq->log_base); } static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) @@ -585,7 +577,6 @@ static int init_used(struct vhost_virtqueue *vq, if (r) return r; - vq->signalled_used_valid = false; return get_user(vq->last_used_idx, &used->idx); } @@ -683,7 +674,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) * If it is not, we don't as size might not have been setup. * We will verify when backend is configured. */ if (vq->private_data) { - if (!vq_access_ok(d, vq->num, + if (!vq_access_ok(vq->num, (void __user *)(unsigned long)a.desc_user_addr, (void __user *)(unsigned long)a.avail_user_addr, (void __user *)(unsigned long)a.used_user_addr)) { @@ -827,7 +818,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg) vq = d->vqs + i; mutex_lock(&vq->mutex); /* If ring is inactive, will check when it's enabled. */ - if (vq->private_data && !vq_log_access_ok(d, vq, base)) + if (vq->private_data && !vq_log_access_ok(vq, base)) r = -EFAULT; else vq->log_base = base; @@ -1228,10 +1219,6 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, /* On success, increment avail index. */ vq->last_avail_idx++; - - /* Assume notifications from guest are disabled at this point, - * if they aren't we would need to update avail_event index. */ - BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); return head; } @@ -1280,12 +1267,6 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) eventfd_signal(vq->log_ctx, 1); } vq->last_used_idx++; - /* If the driver never bothers to signal in a very long while, - * used index might wrap around. If that happens, invalidate - * signalled_used index we stored. TODO: make sure driver - * signals at least once in 2^16 and remove this. */ - if (unlikely(vq->last_used_idx == vq->signalled_used)) - vq->signalled_used_valid = false; return 0; } @@ -1294,7 +1275,6 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, unsigned count) { struct vring_used_elem __user *used; - u16 old, new; int start; start = vq->last_used_idx % vq->num; @@ -1312,14 +1292,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, ((void __user *)used - (void __user *)vq->used), count * sizeof *used); } - old = vq->last_used_idx; - new = (vq->last_used_idx += count); - /* If the driver never bothers to signal in a very long while, - * used index might wrap around. If that happens, invalidate - * signalled_used index we stored. TODO: make sure driver - * signals at least once in 2^16 and remove this. */ - if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) - vq->signalled_used_valid = false; + vq->last_used_idx += count; return 0; } @@ -1358,47 +1331,29 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, return r; } -static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) +/* This actually signals the guest, using eventfd. */ +void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) { - __u16 old, new, event; - bool v; + __u16 flags; + /* Flush out used index updates. This is paired * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); - if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && - unlikely(vq->avail_idx == vq->last_avail_idx)) - return true; - - if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { - __u16 flags; - if (__get_user(flags, &vq->avail->flags)) { - vq_err(vq, "Failed to get flags"); - return true; - } - return !(flags & VRING_AVAIL_F_NO_INTERRUPT); + if (__get_user(flags, &vq->avail->flags)) { + vq_err(vq, "Failed to get flags"); + return; } - old = vq->signalled_used; - v = vq->signalled_used_valid; - new = vq->signalled_used = vq->last_used_idx; - vq->signalled_used_valid = true; - - if (unlikely(!v)) - return true; - if (get_user(event, vhost_used_event(vq))) { - vq_err(vq, "Failed to get used event idx"); - return true; - } - return vring_need_event(event, new, old); -} + /* If they don't want an interrupt, don't signal, unless empty. */ + if ((flags & VRING_AVAIL_F_NO_INTERRUPT) && + (vq->avail_idx != vq->last_avail_idx || + !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY))) + return; -/* This actually signals the guest, using eventfd. */ -void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) -{ /* Signal the Guest tell them we used something up. */ - if (vq->call_ctx && vhost_notify(dev, vq)) + if (vq->call_ctx) eventfd_signal(vq->call_ctx, 1); } @@ -1421,7 +1376,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev, } /* OK, now we need to know about added descriptors. */ -bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) +bool vhost_enable_notify(struct vhost_virtqueue *vq) { u16 avail_idx; int r; @@ -1429,34 +1384,11 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) return false; vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; - if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { - r = put_user(vq->used_flags, &vq->used->flags); - if (r) { - vq_err(vq, "Failed to enable notification at %p: %d\n", - &vq->used->flags, r); - return false; - } - } else { - r = put_user(vq->avail_idx, vhost_avail_event(vq)); - if (r) { - vq_err(vq, "Failed to update avail event index at %p: %d\n", - vhost_avail_event(vq), r); - return false; - } - } - if (unlikely(vq->log_used)) { - void __user *used; - /* Make sure data is seen before log. */ - smp_wmb(); - used = vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ? - &vq->used->flags : vhost_avail_event(vq); - /* Log used flags or event index entry write. Both are 16 bit - * fields. */ - log_write(vq->log_base, vq->log_addr + - (used - (void __user *)vq->used), - sizeof(u16)); - if (vq->log_ctx) - eventfd_signal(vq->log_ctx, 1); + r = put_user(vq->used_flags, &vq->used->flags); + if (r) { + vq_err(vq, "Failed to enable notification at %p: %d\n", + &vq->used->flags, r); + return false; } /* They could have slipped one in as we were doing that: make * sure it's written, then check again. */ @@ -1472,17 +1404,15 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) } /* We don't need to be notified again. */ -void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) +void vhost_disable_notify(struct vhost_virtqueue *vq) { int r; if (vq->used_flags & VRING_USED_F_NO_NOTIFY) return; vq->used_flags |= VRING_USED_F_NO_NOTIFY; - if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { - r = put_user(vq->used_flags, &vq->used->flags); - if (r) - vq_err(vq, "Failed to enable notification at %p: %d\n", - &vq->used->flags, r); - } + r = put_user(vq->used_flags, &vq->used->flags); + if (r) + vq_err(vq, "Failed to enable notification at %p: %d\n", + &vq->used->flags, r); } diff --git a/trunk/drivers/vhost/vhost.h b/trunk/drivers/vhost/vhost.h index 8e03379dd30f..b3363ae38518 100644 --- a/trunk/drivers/vhost/vhost.h +++ b/trunk/drivers/vhost/vhost.h @@ -84,12 +84,6 @@ struct vhost_virtqueue { /* Used flags */ u16 used_flags; - /* Last used index value we have signalled on */ - u16 signalled_used; - - /* Last used index value we have signalled on */ - bool signalled_used_valid; - /* Log writes to used structure. */ bool log_used; u64 log_addr; @@ -155,8 +149,8 @@ void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *, void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *, struct vring_used_elem *heads, unsigned count); void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *); -void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *); -bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); +void vhost_disable_notify(struct vhost_virtqueue *); +bool vhost_enable_notify(struct vhost_virtqueue *); int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, unsigned int log_num, u64 len); @@ -168,12 +162,11 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, } while (0) enum { - VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | - (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | - (1ULL << VIRTIO_RING_F_EVENT_IDX) | - (1ULL << VHOST_F_LOG_ALL) | - (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | - (1ULL << VIRTIO_NET_F_MRG_RXBUF), + VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) | + (1 << VIRTIO_RING_F_INDIRECT_DESC) | + (1 << VHOST_F_LOG_ALL) | + (1 << VHOST_NET_F_VIRTIO_NET_HDR) | + (1 << VIRTIO_NET_F_MRG_RXBUF), }; static inline int vhost_has_feature(struct vhost_dev *dev, int bit) diff --git a/trunk/drivers/virtio/virtio_balloon.c b/trunk/drivers/virtio/virtio_balloon.c index e058ace2a4ad..0f1da45ba47d 100644 --- a/trunk/drivers/virtio/virtio_balloon.c +++ b/trunk/drivers/virtio/virtio_balloon.c @@ -40,6 +40,9 @@ struct virtio_balloon /* Waiting for host to ack the pages we released. */ struct completion acked; + /* Do we have to tell Host *before* we reuse pages? */ + bool tell_host_first; + /* The pages we've told the Host we're not using. */ unsigned int num_pages; struct list_head pages; @@ -148,14 +151,13 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num) vb->num_pages--; } - - /* - * Note that if - * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); - * is true, we *have* to do it in this order - */ - tell_host(vb, vb->deflate_vq); - release_pages_by_pfn(vb->pfns, vb->num_pfns); + if (vb->tell_host_first) { + tell_host(vb, vb->deflate_vq); + release_pages_by_pfn(vb->pfns, vb->num_pfns); + } else { + release_pages_by_pfn(vb->pfns, vb->num_pfns); + tell_host(vb, vb->deflate_vq); + } } static inline void update_stat(struct virtio_balloon *vb, int idx, @@ -323,6 +325,9 @@ static int virtballoon_probe(struct virtio_device *vdev) goto out_del_vqs; } + vb->tell_host_first + = virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); + return 0; out_del_vqs: diff --git a/trunk/drivers/virtio/virtio_ring.c b/trunk/drivers/virtio/virtio_ring.c index 68b9136847af..b0043fb26a4d 100644 --- a/trunk/drivers/virtio/virtio_ring.c +++ b/trunk/drivers/virtio/virtio_ring.c @@ -82,9 +82,6 @@ struct vring_virtqueue /* Host supports indirect buffers */ bool indirect; - /* Host publishes avail event idx */ - bool event; - /* Number of free buffers */ unsigned int num_free; /* Head of free buffer list. */ @@ -240,22 +237,18 @@ EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); void virtqueue_kick(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); - u16 new, old; START_USE(vq); /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(); - old = vq->vring.avail->idx; - new = vq->vring.avail->idx = old + vq->num_added; + vq->vring.avail->idx += vq->num_added; vq->num_added = 0; /* Need to update avail index before checking if we should notify */ virtio_mb(); - if (vq->event ? - vring_need_event(vring_avail_event(&vq->vring), new, old) : - !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) + if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) /* Prod other side to tell it about changes. */ vq->notify(&vq->vq); @@ -331,14 +324,6 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) ret = vq->data[i]; detach_buf(vq, i); vq->last_used_idx++; - /* If we expect an interrupt for the next entry, tell host - * by writing event index and flush out the write before - * the read in the next get_buf call. */ - if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { - vring_used_event(&vq->vring) = vq->last_used_idx; - virtio_mb(); - } - END_USE(vq); return ret; } @@ -360,11 +345,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) /* We optimistically turn back on interrupts, then check if there was * more to do. */ - /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to - * either clear the flags bit or point the event index at the next - * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; - vring_used_event(&vq->vring) = vq->last_used_idx; virtio_mb(); if (unlikely(more_used(vq))) { END_USE(vq); @@ -376,33 +357,6 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); -bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) -{ - struct vring_virtqueue *vq = to_vvq(_vq); - u16 bufs; - - START_USE(vq); - - /* We optimistically turn back on interrupts, then check if there was - * more to do. */ - /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to - * either clear the flags bit or point the event index at the next - * entry. Always do both to keep code simple. */ - vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; - /* TODO: tune this threshold */ - bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; - vring_used_event(&vq->vring) = vq->last_used_idx + bufs; - virtio_mb(); - if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { - END_USE(vq); - return false; - } - - END_USE(vq); - return true; -} -EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); - void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -484,7 +438,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, #endif vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); - vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); /* No callback? Tell other side not to bother us. */ if (!callback) @@ -519,8 +472,6 @@ void vring_transport_features(struct virtio_device *vdev) switch (i) { case VIRTIO_RING_F_INDIRECT_DESC: break; - case VIRTIO_RING_F_EVENT_IDX: - break; default: /* We don't understand this bit. */ clear_bit(i, vdev->features); diff --git a/trunk/fs/autofs4/root.c b/trunk/fs/autofs4/root.c index f55ae23b137e..87d95a8cddbc 100644 --- a/trunk/fs/autofs4/root.c +++ b/trunk/fs/autofs4/root.c @@ -583,6 +583,8 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) return -EACCES; + dentry_unhash(dentry); + if (atomic_dec_and_test(&ino->count)) { p_ino = autofs4_dentry_ino(dentry->d_parent); if (p_ino && dentry->d_parent != dentry) diff --git a/trunk/fs/block_dev.c b/trunk/fs/block_dev.c index 1a2421f908f0..1f2b19978333 100644 --- a/trunk/fs/block_dev.c +++ b/trunk/fs/block_dev.c @@ -1272,8 +1272,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) * individual writeable reference is too fragile given the * way @mode is used in blkdev_get/put(). */ - if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder && - (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { + if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) && + !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { bdev->bd_write_holder = true; disk_block_events(disk); } diff --git a/trunk/fs/btrfs/btrfs_inode.h b/trunk/fs/btrfs/btrfs_inode.h index 52d7eca8c7bf..93b1aa932014 100644 --- a/trunk/fs/btrfs/btrfs_inode.h +++ b/trunk/fs/btrfs/btrfs_inode.h @@ -121,6 +121,9 @@ struct btrfs_inode { */ u64 index_cnt; + /* the start of block group preferred for allocations. */ + u64 block_group; + /* the fsync log has some corner cases that mean we have to check * directories to see if any unlinks have been done before * the directory was logged. See tree-log.c for all the diff --git a/trunk/fs/btrfs/ctree.c b/trunk/fs/btrfs/ctree.c index d84089349c82..b0e18d986e0a 100644 --- a/trunk/fs/btrfs/ctree.c +++ b/trunk/fs/btrfs/ctree.c @@ -43,6 +43,8 @@ struct btrfs_path *btrfs_alloc_path(void) { struct btrfs_path *path; path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); + if (path) + path->reada = 1; return path; } @@ -1222,7 +1224,6 @@ static void reada_for_search(struct btrfs_root *root, u64 search; u64 target; u64 nread = 0; - u64 gen; int direction = path->reada; struct extent_buffer *eb; u32 nr; @@ -1250,15 +1251,6 @@ static void reada_for_search(struct btrfs_root *root, nritems = btrfs_header_nritems(node); nr = slot; while (1) { - if (!node->map_token) { - unsigned long offset = btrfs_node_key_ptr_offset(nr); - map_private_extent_buffer(node, offset, - sizeof(struct btrfs_key_ptr), - &node->map_token, - &node->kaddr, - &node->map_start, - &node->map_len, KM_USER1); - } if (direction < 0) { if (nr == 0) break; @@ -1276,23 +1268,14 @@ static void reada_for_search(struct btrfs_root *root, search = btrfs_node_blockptr(node, nr); if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { - gen = btrfs_node_ptr_generation(node, nr); - if (node->map_token) { - unmap_extent_buffer(node, node->map_token, - KM_USER1); - node->map_token = NULL; - } - readahead_tree_block(root, search, blocksize, gen); + readahead_tree_block(root, search, blocksize, + btrfs_node_ptr_generation(node, nr)); nread += blocksize; } nscan++; if ((nread > 65536 || nscan > 32)) break; } - if (node->map_token) { - unmap_extent_buffer(node, node->map_token, KM_USER1); - node->map_token = NULL; - } } /* @@ -1665,6 +1648,9 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root } cow_done: BUG_ON(!cow && ins_len); + if (level != btrfs_header_level(b)) + WARN_ON(1); + level = btrfs_header_level(b); p->nodes[level] = b; if (!p->skip_locking) diff --git a/trunk/fs/btrfs/ctree.h b/trunk/fs/btrfs/ctree.h index 378b5b4443f3..6c093fa98f61 100644 --- a/trunk/fs/btrfs/ctree.h +++ b/trunk/fs/btrfs/ctree.h @@ -930,6 +930,7 @@ struct btrfs_fs_info { * is required instead of the faster short fsync log commits */ u64 last_trans_log_full_commit; + u64 open_ioctl_trans; unsigned long mount_opt:20; unsigned long compress_type:4; u64 max_inline; @@ -946,6 +947,7 @@ struct btrfs_fs_info { struct super_block *sb; struct inode *btree_inode; struct backing_dev_info bdi; + struct mutex trans_mutex; struct mutex tree_log_mutex; struct mutex transaction_kthread_mutex; struct mutex cleaner_mutex; @@ -966,7 +968,6 @@ struct btrfs_fs_info { struct rw_semaphore subvol_sem; struct srcu_struct subvol_srcu; - spinlock_t trans_lock; struct list_head trans_list; struct list_head hashers; struct list_head dead_roots; @@ -979,7 +980,6 @@ struct btrfs_fs_info { atomic_t async_submit_draining; atomic_t nr_async_bios; atomic_t async_delalloc_pages; - atomic_t open_ioctl_trans; /* * this is used by the balancing code to wait for all the pending @@ -1044,7 +1044,6 @@ struct btrfs_fs_info { int closing; int log_root_recovering; int enospc_unlink; - int trans_no_join; u64 total_pinned; @@ -1066,6 +1065,7 @@ struct btrfs_fs_info { struct reloc_control *reloc_ctl; spinlock_t delalloc_lock; + spinlock_t new_trans_lock; u64 delalloc_bytes; /* data_alloc_cluster is only used in ssd mode */ @@ -1340,7 +1340,6 @@ struct btrfs_ioctl_defrag_range_args { #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) -#define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -2239,9 +2238,6 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, void btrfs_block_rsv_release(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 num_bytes); -int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_block_rsv *rsv); int btrfs_set_block_group_ro(struct btrfs_root *root, struct btrfs_block_group_cache *cache); int btrfs_set_block_group_rw(struct btrfs_root *root, @@ -2354,15 +2350,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *node, struct extent_buffer *parent); -static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) -{ - /* - * Get synced with close_ctree() - */ - smp_mb(); - return fs_info->closing; -} - /* root-item.c */ int btrfs_find_root_ref(struct btrfs_root *tree_root, struct btrfs_path *path, @@ -2525,7 +2512,8 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc); int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, - struct btrfs_root *new_root, u64 new_dirid); + struct btrfs_root *new_root, + u64 new_dirid, u64 alloc_hint); int btrfs_merge_bio_hook(struct page *page, unsigned long offset, size_t size, struct bio *bio, unsigned long bio_flags); diff --git a/trunk/fs/btrfs/delayed-inode.c b/trunk/fs/btrfs/delayed-inode.c index 6462c29d2d37..01e29503a54b 100644 --- a/trunk/fs/btrfs/delayed-inode.c +++ b/trunk/fs/btrfs/delayed-inode.c @@ -678,7 +678,6 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, INIT_LIST_HEAD(&head); next = item; - nitems = 0; /* * count the number of the continuous items that we can insert in batch @@ -1130,7 +1129,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) delayed_node = async_node->delayed_node; root = delayed_node->root; - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) goto free_path; @@ -1573,7 +1572,8 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, btrfs_set_stack_inode_transid(inode_item, trans->transid); btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); - btrfs_set_stack_inode_block_group(inode_item, 0); + btrfs_set_stack_inode_block_group(inode_item, + BTRFS_I(inode)->block_group); btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), inode->i_atime.tv_sec); @@ -1595,7 +1595,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { struct btrfs_delayed_node *delayed_node; - int ret = 0; + int ret; delayed_node = btrfs_get_or_create_delayed_node(inode); if (IS_ERR(delayed_node)) diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index a203d363184d..98b6a71decba 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -1505,24 +1505,24 @@ static int transaction_kthread(void *arg) vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); mutex_lock(&root->fs_info->transaction_kthread_mutex); - spin_lock(&root->fs_info->trans_lock); + spin_lock(&root->fs_info->new_trans_lock); cur = root->fs_info->running_transaction; if (!cur) { - spin_unlock(&root->fs_info->trans_lock); + spin_unlock(&root->fs_info->new_trans_lock); goto sleep; } now = get_seconds(); if (!cur->blocked && (now < cur->start_time || now - cur->start_time < 30)) { - spin_unlock(&root->fs_info->trans_lock); + spin_unlock(&root->fs_info->new_trans_lock); delay = HZ * 5; goto sleep; } transid = cur->transid; - spin_unlock(&root->fs_info->trans_lock); + spin_unlock(&root->fs_info->new_trans_lock); - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); if (transid == trans->transid) { ret = btrfs_commit_transaction(trans, root); @@ -1613,7 +1613,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->ordered_operations); INIT_LIST_HEAD(&fs_info->caching_block_groups); spin_lock_init(&fs_info->delalloc_lock); - spin_lock_init(&fs_info->trans_lock); + spin_lock_init(&fs_info->new_trans_lock); spin_lock_init(&fs_info->ref_cache_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); @@ -1645,7 +1645,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->max_inline = 8192 * 1024; fs_info->metadata_ratio = 0; fs_info->defrag_inodes = RB_ROOT; - fs_info->trans_no_join = 0; fs_info->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); @@ -1710,6 +1709,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->do_barriers = 1; + mutex_init(&fs_info->trans_mutex); mutex_init(&fs_info->ordered_operations_mutex); mutex_init(&fs_info->tree_log_mutex); mutex_init(&fs_info->chunk_mutex); @@ -2479,13 +2479,13 @@ int btrfs_commit_super(struct btrfs_root *root) down_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem); - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); /* run commit again to drop the original snapshot */ - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); btrfs_commit_transaction(trans, root); @@ -3024,13 +3024,10 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) WARN_ON(1); + mutex_lock(&root->fs_info->trans_mutex); mutex_lock(&root->fs_info->transaction_kthread_mutex); - spin_lock(&root->fs_info->trans_lock); list_splice_init(&root->fs_info->trans_list, &list); - root->fs_info->trans_no_join = 1; - spin_unlock(&root->fs_info->trans_lock); - while (!list_empty(&list)) { t = list_entry(list.next, struct btrfs_transaction, list); if (!t) @@ -3055,18 +3052,23 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) t->blocked = 0; if (waitqueue_active(&root->fs_info->transaction_wait)) wake_up(&root->fs_info->transaction_wait); + mutex_unlock(&root->fs_info->trans_mutex); + mutex_lock(&root->fs_info->trans_mutex); t->commit_done = 1; if (waitqueue_active(&t->commit_wait)) wake_up(&t->commit_wait); + mutex_unlock(&root->fs_info->trans_mutex); + + mutex_lock(&root->fs_info->trans_mutex); btrfs_destroy_pending_snapshots(t); btrfs_destroy_delalloc_inodes(root); - spin_lock(&root->fs_info->trans_lock); + spin_lock(&root->fs_info->new_trans_lock); root->fs_info->running_transaction = NULL; - spin_unlock(&root->fs_info->trans_lock); + spin_unlock(&root->fs_info->new_trans_lock); btrfs_destroy_marked_extents(root, &t->dirty_pages, EXTENT_DIRTY); @@ -3080,10 +3082,8 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) kmem_cache_free(btrfs_transaction_cachep, t); } - spin_lock(&root->fs_info->trans_lock); - root->fs_info->trans_no_join = 0; - spin_unlock(&root->fs_info->trans_lock); mutex_unlock(&root->fs_info->transaction_kthread_mutex); + mutex_unlock(&root->fs_info->trans_mutex); return 0; } diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index 5b9b6b6df242..169bd62ce776 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -348,7 +348,7 @@ static int caching_kthread(void *data) */ path->skip_locking = 1; path->search_commit_root = 1; - path->reada = 1; + path->reada = 2; key.objectid = last; key.offset = 0; @@ -366,7 +366,8 @@ static int caching_kthread(void *data) nritems = btrfs_header_nritems(leaf); while (1) { - if (btrfs_fs_closing(fs_info) > 1) { + smp_mb(); + if (fs_info->closing > 1) { last = (u64)-1; break; } @@ -378,18 +379,15 @@ static int caching_kthread(void *data) if (ret) break; - if (need_resched() || - btrfs_next_leaf(extent_root, path)) { - caching_ctl->progress = last; - btrfs_release_path(path); - up_read(&fs_info->extent_commit_sem); - mutex_unlock(&caching_ctl->mutex); + caching_ctl->progress = last; + btrfs_release_path(path); + up_read(&fs_info->extent_commit_sem); + mutex_unlock(&caching_ctl->mutex); + if (btrfs_transaction_in_commit(fs_info)) + schedule_timeout(1); + else cond_resched(); - goto again; - } - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - continue; + goto again; } if (key.objectid < block_group->key.objectid) { @@ -3067,7 +3065,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) spin_unlock(&data_sinfo->lock); alloc: alloc_target = btrfs_get_alloc_profile(root, 1); - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); @@ -3093,10 +3091,9 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) /* commit the current transaction and try again */ commit_trans: - if (!committed && - !atomic_read(&root->fs_info->open_ioctl_trans)) { + if (!committed && !root->fs_info->open_ioctl_trans) { committed = 1; - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); @@ -3475,7 +3472,7 @@ static int reserve_metadata_bytes(struct btrfs_trans_handle *trans, goto out; ret = -ENOSPC; - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) goto out; ret = btrfs_commit_transaction(trans, root); @@ -3702,7 +3699,7 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, if (trans) return -EAGAIN; - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); ret = btrfs_commit_transaction(trans, root); return 0; @@ -3840,37 +3837,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info) WARN_ON(fs_info->chunk_block_rsv.reserved > 0); } -int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_block_rsv *rsv) -{ - struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv; - u64 num_bytes; - int ret; - - /* - * Truncate should be freeing data, but give us 2 items just in case it - * needs to use some space. We may want to be smarter about this in the - * future. - */ - num_bytes = btrfs_calc_trans_metadata_size(root, 2); - - /* We already have enough bytes, just return */ - if (rsv->reserved >= num_bytes) - return 0; - - num_bytes -= rsv->reserved; - - /* - * You should have reserved enough space before hand to do this, so this - * should not fail. - */ - ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes); - BUG_ON(ret); - - return 0; -} - int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root, int num_items) @@ -3911,18 +3877,23 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; /* - * We need to hold space in order to delete our orphan item once we've - * added it, so this takes the reservation so we can release it later - * when we are truly done with the orphan item. + * one for deleting orphan item, one for updating inode and + * two for calling btrfs_truncate_inode_items. + * + * btrfs_truncate_inode_items is a delete operation, it frees + * more space than it uses in most cases. So two units of + * metadata space should be enough for calling it many times. + * If all of the metadata space is used, we can commit + * transaction and use space it freed. */ - u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } void btrfs_orphan_release_metadata(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); } @@ -5016,15 +4987,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, if (unlikely(block_group->ro)) goto loop; - spin_lock(&block_group->free_space_ctl->tree_lock); - if (cached && - block_group->free_space_ctl->free_space < - num_bytes + empty_size) { - spin_unlock(&block_group->free_space_ctl->tree_lock); - goto loop; - } - spin_unlock(&block_group->free_space_ctl->tree_lock); - /* * Ok we want to try and use the cluster allocator, so lets look * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will @@ -5188,7 +5150,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, btrfs_add_free_space(block_group, offset, search_start - offset); BUG_ON(offset > search_start); - btrfs_put_block_group(block_group); break; loop: failed_cluster_refill = false; @@ -5281,7 +5242,14 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ret = -ENOSPC; } else if (!ins->objectid) { ret = -ENOSPC; - } else if (ins->objectid) { + } + + /* we found what we needed */ + if (ins->objectid) { + if (!(data & BTRFS_BLOCK_GROUP_DATA)) + trans->block_group = block_group->key.objectid; + + btrfs_put_block_group(block_group); ret = 0; } @@ -6558,7 +6526,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, BUG_ON(cache->ro); - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); alloc_flags = update_block_group_flags(root, cache->flags); @@ -6914,7 +6882,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) path = btrfs_alloc_path(); if (!path) return -ENOMEM; - path->reada = 1; cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy); if (cache_gen != 0 && diff --git a/trunk/fs/btrfs/extent_io.c b/trunk/fs/btrfs/extent_io.c index 7055d11c1efd..c5d9fbb92bc3 100644 --- a/trunk/fs/btrfs/extent_io.c +++ b/trunk/fs/btrfs/extent_io.c @@ -1476,7 +1476,7 @@ u64 count_range_bits(struct extent_io_tree *tree, if (total_bytes >= max_bytes) break; if (!found) { - *start = max(cur_start, state->start); + *start = state->start; found = 1; } last = state->end; diff --git a/trunk/fs/btrfs/file.c b/trunk/fs/btrfs/file.c index fa4ef18b66b1..c6a22d783c35 100644 --- a/trunk/fs/btrfs/file.c +++ b/trunk/fs/btrfs/file.c @@ -129,7 +129,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, if (!btrfs_test_opt(root, AUTO_DEFRAG)) return 0; - if (btrfs_fs_closing(root->fs_info)) + if (root->fs_info->closing) return 0; if (BTRFS_I(inode)->in_defrag) @@ -144,7 +144,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, if (!defrag) return -ENOMEM; - defrag->ino = btrfs_ino(inode); + defrag->ino = inode->i_ino; defrag->transid = transid; defrag->root = root->root_key.objectid; @@ -229,7 +229,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) first_ino = defrag->ino + 1; rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); - if (btrfs_fs_closing(fs_info)) + if (fs_info->closing) goto next_free; spin_unlock(&fs_info->defrag_inodes_lock); @@ -1480,12 +1480,14 @@ int btrfs_sync_file(struct file *file, int datasync) * the current transaction, we can bail out now without any * syncing */ - smp_mb(); + mutex_lock(&root->fs_info->trans_mutex); if (BTRFS_I(inode)->last_trans <= root->fs_info->last_trans_committed) { BTRFS_I(inode)->last_trans = 0; + mutex_unlock(&root->fs_info->trans_mutex); goto out; } + mutex_unlock(&root->fs_info->trans_mutex); /* * ok we haven't committed the transaction yet, lets do a commit diff --git a/trunk/fs/btrfs/free-space-cache.c b/trunk/fs/btrfs/free-space-cache.c index ad144736a5fd..70d45795d758 100644 --- a/trunk/fs/btrfs/free-space-cache.c +++ b/trunk/fs/btrfs/free-space-cache.c @@ -98,7 +98,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, return inode; spin_lock(&block_group->lock); - if (!btrfs_fs_closing(root->fs_info)) { + if (!root->fs_info->closing) { block_group->inode = igrab(inode); block_group->iref = 1; } @@ -402,14 +402,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, spin_lock(&ctl->tree_lock); ret = link_free_space(ctl, e); spin_unlock(&ctl->tree_lock); - if (ret) { - printk(KERN_ERR "Duplicate entries in " - "free space cache, dumping\n"); - kunmap(page); - unlock_page(page); - page_cache_release(page); - goto free_cache; - } + BUG_ON(ret); } else { e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); if (!e->bitmap) { @@ -426,14 +419,6 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, ctl->op->recalc_thresholds(ctl); spin_unlock(&ctl->tree_lock); list_add_tail(&e->list, &bitmaps); - if (ret) { - printk(KERN_ERR "Duplicate entries in " - "free space cache, dumping\n"); - kunmap(page); - unlock_page(page); - page_cache_release(page); - goto free_cache; - } } num_entries--; @@ -493,7 +478,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, * If we're unmounting then just return, since this does a search on the * normal root and not the commit root and we could deadlock. */ - if (btrfs_fs_closing(fs_info)) + smp_mb(); + if (fs_info->closing) return 0; /* @@ -589,25 +575,10 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - - /* Since the first page has all of our checksums and our generation we - * need to calculate the offset into the page that we can start writing - * our entries. - */ - first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); - filemap_write_and_wait(inode->i_mapping); btrfs_wait_ordered_range(inode, inode->i_size & ~(root->sectorsize - 1), (u64)-1); - /* make sure we don't overflow that first page */ - if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) { - /* this is really the same as running out of space, where we also return 0 */ - printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n"); - ret = 0; - goto out_update; - } - /* We need a checksum per page. */ crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); if (!crc) @@ -619,6 +590,12 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, return -1; } + /* Since the first page has all of our checksums and our generation we + * need to calculate the offset into the page that we can start writing + * our entries. + */ + first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); + /* Get the cluster for this block_group if it exists */ if (block_group && !list_empty(&block_group->cluster_list)) cluster = list_entry(block_group->cluster_list.next, @@ -880,14 +857,12 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ret = 1; out_free: - kfree(checksums); - kfree(pages); - -out_update: if (ret != 1) { invalidate_inode_pages2_range(inode->i_mapping, 0, index); BTRFS_I(inode)->generation = 0; } + kfree(checksums); + kfree(pages); btrfs_update_inode(trans, root, inode); return ret; } @@ -988,16 +963,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, * logically. */ if (bitmap) { - if (info->bitmap) { - WARN_ON_ONCE(1); - return -EEXIST; - } + WARN_ON(info->bitmap); p = &(*p)->rb_right; } else { - if (!info->bitmap) { - WARN_ON_ONCE(1); - return -EEXIST; - } + WARN_ON(!info->bitmap); p = &(*p)->rb_left; } } @@ -2512,7 +2481,7 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root, return inode; spin_lock(&root->cache_lock); - if (!btrfs_fs_closing(root->fs_info)) + if (!root->fs_info->closing) root->cache_inode = igrab(inode); spin_unlock(&root->cache_lock); @@ -2535,14 +2504,12 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) int ret = 0; u64 root_gen = btrfs_root_generation(&root->root_item); - if (!btrfs_test_opt(root, INODE_MAP_CACHE)) - return 0; - /* * If we're unmounting then just return, since this does a search on the * normal root and not the commit root and we could deadlock. */ - if (btrfs_fs_closing(fs_info)) + smp_mb(); + if (fs_info->closing) return 0; path = btrfs_alloc_path(); @@ -2576,9 +2543,6 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, struct inode *inode; int ret; - if (!btrfs_test_opt(root, INODE_MAP_CACHE)) - return 0; - inode = lookup_free_ino_inode(root, path); if (IS_ERR(inode)) return 0; diff --git a/trunk/fs/btrfs/inode-map.c b/trunk/fs/btrfs/inode-map.c index b4087e0fa871..3262cd17a12f 100644 --- a/trunk/fs/btrfs/inode-map.c +++ b/trunk/fs/btrfs/inode-map.c @@ -38,9 +38,6 @@ static int caching_kthread(void *data) int slot; int ret; - if (!btrfs_test_opt(root, INODE_MAP_CACHE)) - return 0; - path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -62,7 +59,8 @@ static int caching_kthread(void *data) goto out; while (1) { - if (btrfs_fs_closing(fs_info)) + smp_mb(); + if (fs_info->closing) goto out; leaf = path->nodes[0]; @@ -143,9 +141,6 @@ static void start_caching(struct btrfs_root *root) int ret; u64 objectid; - if (!btrfs_test_opt(root, INODE_MAP_CACHE)) - return; - spin_lock(&root->cache_lock); if (root->cached != BTRFS_CACHE_NO) { spin_unlock(&root->cache_lock); @@ -183,9 +178,6 @@ static void start_caching(struct btrfs_root *root) int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) { - if (!btrfs_test_opt(root, INODE_MAP_CACHE)) - return btrfs_find_free_objectid(root, objectid); - again: *objectid = btrfs_find_ino_for_alloc(root); @@ -209,10 +201,6 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid) { struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; - - if (!btrfs_test_opt(root, INODE_MAP_CACHE)) - return; - again: if (root->cached == BTRFS_CACHE_FINISHED) { __btrfs_add_free_space(ctl, objectid, 1); @@ -262,9 +250,6 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) struct rb_node *n; u64 count; - if (!btrfs_test_opt(root, INODE_MAP_CACHE)) - return; - while (1) { n = rb_first(rbroot); if (!n) @@ -403,24 +388,9 @@ int btrfs_save_ino_cache(struct btrfs_root *root, int prealloc; bool retry = false; - /* only fs tree and subvol/snap needs ino cache */ - if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID && - (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID || - root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID)) - return 0; - - /* Don't save inode cache if we are deleting this root */ - if (btrfs_root_refs(&root->root_item) == 0 && - root != root->fs_info->tree_root) - return 0; - - if (!btrfs_test_opt(root, INODE_MAP_CACHE)) - return 0; - path = btrfs_alloc_path(); if (!path) return -ENOMEM; - again: inode = lookup_free_ino_inode(root, path); if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index ebf95f7a44d6..39a9d5750efd 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -138,6 +138,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; + btrfs_set_trans_block_group(trans, inode); key.objectid = btrfs_ino(inode); key.offset = start; @@ -425,8 +426,9 @@ static noinline int compress_file_range(struct inode *inode, } } if (start == 0) { - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); + btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; /* lets try to make an inline extent */ @@ -621,9 +623,8 @@ static noinline int submit_compressed_extents(struct inode *inode, async_extent->start + async_extent->ram_size - 1, GFP_NOFS); - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); - trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_reserve_extent(trans, root, async_extent->compressed_size, async_extent->compressed_size, @@ -792,8 +793,9 @@ static noinline int cow_file_range(struct inode *inode, int ret = 0; BUG_ON(is_free_space_inode(root, inode)); - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); + btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; num_bytes = (end - start + blocksize) & ~(blocksize - 1); @@ -1075,12 +1077,10 @@ static noinline int run_delalloc_nocow(struct inode *inode, nolock = is_free_space_inode(root, inode); if (nolock) - trans = btrfs_join_transaction_nolock(root); + trans = btrfs_join_transaction_nolock(root, 1); else - trans = btrfs_join_transaction(root); - + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); - trans->block_rsv = &root->fs_info->delalloc_block_rsv; cow_start = (u64)-1; cur_offset = start; @@ -1519,6 +1519,8 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, { struct btrfs_ordered_sum *sum; + btrfs_set_trans_block_group(trans, inode); + list_for_each_entry(sum, list, list) { btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root->fs_info->csum_root, sum); @@ -1733,10 +1735,11 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); if (!ret) { if (nolock) - trans = btrfs_join_transaction_nolock(root); + trans = btrfs_join_transaction_nolock(root, 1); else - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); + btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); @@ -1749,10 +1752,11 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) 0, &cached_state, GFP_NOFS); if (nolock) - trans = btrfs_join_transaction_nolock(root); + trans = btrfs_join_transaction_nolock(root, 1); else - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); + btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) @@ -2427,7 +2431,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) (u64)-1); if (root->orphan_block_rsv || root->orphan_item_inserted) { - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); if (!IS_ERR(trans)) btrfs_end_transaction(trans, root); } @@ -2507,12 +2511,12 @@ static void btrfs_read_locked_inode(struct inode *inode) struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key location; int maybe_acls; + u64 alloc_group_block; u32 rdev; int ret; path = btrfs_alloc_path(); BUG_ON(!path); - path->leave_spinning = 1; memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); ret = btrfs_lookup_inode(NULL, root, path, &location, 0); @@ -2522,12 +2526,6 @@ static void btrfs_read_locked_inode(struct inode *inode) leaf = path->nodes[0]; inode_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); - if (!leaf->map_token) - map_private_extent_buffer(leaf, (unsigned long)inode_item, - sizeof(struct btrfs_inode_item), - &leaf->map_token, &leaf->kaddr, - &leaf->map_start, &leaf->map_len, - KM_USER1); inode->i_mode = btrfs_inode_mode(leaf, inode_item); inode->i_nlink = btrfs_inode_nlink(leaf, inode_item); @@ -2557,6 +2555,8 @@ static void btrfs_read_locked_inode(struct inode *inode) BTRFS_I(inode)->index_cnt = (u64)-1; BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); + alloc_group_block = btrfs_inode_block_group(leaf, inode_item); + /* * try to precache a NULL acl entry for files that don't have * any xattrs or acls @@ -2566,11 +2566,8 @@ static void btrfs_read_locked_inode(struct inode *inode) if (!maybe_acls) cache_no_acl(inode); - if (leaf->map_token) { - unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); - leaf->map_token = NULL; - } - + BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, + alloc_group_block, 0); btrfs_free_path(path); inode_item = NULL; @@ -2650,7 +2647,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_inode_transid(leaf, item, trans->transid); btrfs_set_inode_rdev(leaf, item, inode->i_rdev); btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); - btrfs_set_inode_block_group(leaf, item, 0); + btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); if (leaf->map_token) { unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); @@ -3007,6 +3004,8 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) if (IS_ERR(trans)) return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, dir); + btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, @@ -3095,6 +3094,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) if (IS_ERR(trans)) return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, dir); + if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { err = btrfs_unlink_subvol(trans, root, dir, BTRFS_I(inode)->location.objectid, @@ -3513,6 +3514,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) err = PTR_ERR(trans); break; } + btrfs_set_trans_block_group(trans, inode); err = btrfs_drop_extents(trans, inode, cur_offset, cur_offset + hole_size, @@ -3648,6 +3650,7 @@ void btrfs_evict_inode(struct inode *inode) while (1) { trans = btrfs_start_transaction(root, 0); BUG_ON(IS_ERR(trans)); + btrfs_set_trans_block_group(trans, inode); trans->block_rsv = root->orphan_block_rsv; ret = btrfs_block_rsv_check(trans, root, @@ -4130,8 +4133,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, path = btrfs_alloc_path(); if (!path) return -ENOMEM; - - path->reada = 1; + path->reada = 2; if (key_type == BTRFS_DIR_INDEX_KEY) { INIT_LIST_HEAD(&ins_list); @@ -4266,16 +4268,18 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) if (BTRFS_I(inode)->dummy_inode) return 0; - if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode)) + smp_mb(); + if (root->fs_info->closing && is_free_space_inode(root, inode)) nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { if (nolock) - trans = btrfs_join_transaction_nolock(root); + trans = btrfs_join_transaction_nolock(root, 1); else - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, inode); if (nolock) ret = btrfs_end_transaction_nolock(trans, root); else @@ -4299,8 +4303,9 @@ void btrfs_dirty_inode(struct inode *inode, int flags) if (BTRFS_I(inode)->dummy_inode) return; - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); + btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); if (ret && ret == -ENOSPC) { @@ -4314,6 +4319,7 @@ void btrfs_dirty_inode(struct inode *inode, int flags) PTR_ERR(trans)); return; } + btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); if (ret) { @@ -4412,8 +4418,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *dir, const char *name, int name_len, - u64 ref_objectid, u64 objectid, int mode, - u64 *index) + u64 ref_objectid, u64 objectid, + u64 alloc_hint, int mode, u64 *index) { struct inode *inode; struct btrfs_inode_item *inode_item; @@ -4466,6 +4472,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, owner = 0; else owner = 1; + BTRFS_I(inode)->block_group = + btrfs_find_block_group(root, 0, alloc_hint, owner); key[0].objectid = objectid; btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); @@ -4621,13 +4629,15 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (IS_ERR(trans)) return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); if (err) goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, btrfs_ino(dir), objectid, - mode, &index); + BTRFS_I(dir)->block_group, mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -4639,6 +4649,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, goto out_unlock; } + btrfs_set_trans_block_group(trans, inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; @@ -4647,6 +4658,8 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, init_special_inode(inode, inode->i_mode, rdev); btrfs_update_inode(trans, root, inode); } + btrfs_update_inode_block_group(trans, inode); + btrfs_update_inode_block_group(trans, dir); out_unlock: nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); @@ -4679,13 +4692,15 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, if (IS_ERR(trans)) return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); if (err) goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, btrfs_ino(dir), objectid, - mode, &index); + BTRFS_I(dir)->block_group, mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -4697,6 +4712,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, goto out_unlock; } + btrfs_set_trans_block_group(trans, inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; @@ -4707,6 +4723,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_op = &btrfs_file_inode_operations; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } + btrfs_update_inode_block_group(trans, inode); + btrfs_update_inode_block_group(trans, dir); out_unlock: nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); @@ -4753,6 +4771,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, btrfs_inc_nlink(inode); inode->i_ctime = CURRENT_TIME; + + btrfs_set_trans_block_group(trans, dir); ihold(inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); @@ -4761,6 +4781,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, drop_inode = 1; } else { struct dentry *parent = dget_parent(dentry); + btrfs_update_inode_block_group(trans, dir); err = btrfs_update_inode(trans, root, inode); BUG_ON(err); btrfs_log_new_name(trans, inode, NULL, parent); @@ -4797,6 +4818,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) trans = btrfs_start_transaction(root, 5); if (IS_ERR(trans)) return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, dir); err = btrfs_find_free_ino(root, &objectid); if (err) @@ -4804,7 +4826,8 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, btrfs_ino(dir), objectid, - S_IFDIR | mode, &index); + BTRFS_I(dir)->block_group, S_IFDIR | mode, + &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_fail; @@ -4818,6 +4841,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) inode->i_op = &btrfs_dir_inode_operations; inode->i_fop = &btrfs_dir_file_operations; + btrfs_set_trans_block_group(trans, inode); btrfs_i_size_write(inode, 0); err = btrfs_update_inode(trans, root, inode); @@ -4831,6 +4855,8 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) d_instantiate(dentry, inode); drop_on_err = 0; + btrfs_update_inode_block_group(trans, inode); + btrfs_update_inode_block_group(trans, dir); out_fail: nr = trans->blocks_used; @@ -4963,15 +4989,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, if (!path) { path = btrfs_alloc_path(); - if (!path) { - err = -ENOMEM; - goto out; - } - /* - * Chances are we'll be called again, so go ahead and do - * readahead - */ - path->reada = 1; + BUG_ON(!path); } ret = btrfs_lookup_file_extent(trans, root, path, @@ -5112,10 +5130,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, kunmap(page); free_extent_map(em); em = NULL; - btrfs_release_path(path); - trans = btrfs_join_transaction(root); - + trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) return ERR_CAST(trans); goto again; @@ -5359,7 +5375,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, btrfs_drop_extent_cache(inode, start, start + len - 1, 0); } - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) return ERR_CAST(trans); @@ -5595,7 +5611,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * to make sure the current transaction stays open * while we look for nocow cross refs */ - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) goto must_cow; @@ -5734,7 +5750,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) BUG_ON(!ordered); - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) { err = -ENOMEM; goto out; @@ -6484,7 +6500,6 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) static int btrfs_truncate(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_block_rsv *rsv; int ret; int err = 0; struct btrfs_trans_handle *trans; @@ -6498,80 +6513,28 @@ static int btrfs_truncate(struct inode *inode) btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); btrfs_ordered_update_i_size(inode, inode->i_size, NULL); - /* - * Yes ladies and gentelment, this is indeed ugly. The fact is we have - * 3 things going on here - * - * 1) We need to reserve space for our orphan item and the space to - * delete our orphan item. Lord knows we don't want to have a dangling - * orphan item because we didn't reserve space to remove it. - * - * 2) We need to reserve space to update our inode. - * - * 3) We need to have something to cache all the space that is going to - * be free'd up by the truncate operation, but also have some slack - * space reserved in case it uses space during the truncate (thank you - * very much snapshotting). - * - * And we need these to all be seperate. The fact is we can use alot of - * space doing the truncate, and we have no earthly idea how much space - * we will use, so we need the truncate reservation to be seperate so it - * doesn't end up using space reserved for updating the inode or - * removing the orphan item. We also need to be able to stop the - * transaction and start a new one, which means we need to be able to - * update the inode several times, and we have no idea of knowing how - * many times that will be, so we can't just reserve 1 item for the - * entirety of the opration, so that has to be done seperately as well. - * Then there is the orphan item, which does indeed need to be held on - * to for the whole operation, and we need nobody to touch this reserved - * space except the orphan code. - * - * So that leaves us with - * - * 1) root->orphan_block_rsv - for the orphan deletion. - * 2) rsv - for the truncate reservation, which we will steal from the - * transaction reservation. - * 3) fs_info->trans_block_rsv - this will have 1 items worth left for - * updating the inode. - */ - rsv = btrfs_alloc_block_rsv(root); - if (!rsv) - return -ENOMEM; - btrfs_add_durable_block_rsv(root->fs_info, rsv); - - trans = btrfs_start_transaction(root, 4); - if (IS_ERR(trans)) { - err = PTR_ERR(trans); - goto out; - } + trans = btrfs_start_transaction(root, 5); + if (IS_ERR(trans)) + return PTR_ERR(trans); - /* - * Reserve space for the truncate process. Truncate should be adding - * space, but if there are snapshots it may end up using space. - */ - ret = btrfs_truncate_reserve_metadata(trans, root, rsv); - BUG_ON(ret); + btrfs_set_trans_block_group(trans, inode); ret = btrfs_orphan_add(trans, inode); if (ret) { btrfs_end_transaction(trans, root); - goto out; + return ret; } nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); - /* - * Ok so we've already migrated our bytes over for the truncate, so here - * just reserve the one slot we need for updating the inode. - */ - trans = btrfs_start_transaction(root, 1); - if (IS_ERR(trans)) { - err = PTR_ERR(trans); - goto out; - } - trans->block_rsv = rsv; + /* Now start a transaction for the truncate */ + trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = root->orphan_block_rsv; /* * setattr is responsible for setting the ordered_data_close flag, @@ -6595,17 +6558,24 @@ static int btrfs_truncate(struct inode *inode) while (1) { if (!trans) { - trans = btrfs_start_transaction(root, 3); - if (IS_ERR(trans)) { - err = PTR_ERR(trans); - goto out; - } - - ret = btrfs_truncate_reserve_metadata(trans, root, - rsv); - BUG_ON(ret); + trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = root->orphan_block_rsv; + } - trans->block_rsv = rsv; + ret = btrfs_block_rsv_check(trans, root, + root->orphan_block_rsv, 0, 5); + if (ret == -EAGAIN) { + ret = btrfs_commit_transaction(trans, root); + if (ret) + return ret; + trans = NULL; + continue; + } else if (ret) { + err = ret; + break; } ret = btrfs_truncate_inode_items(trans, root, inode, @@ -6616,7 +6586,6 @@ static int btrfs_truncate(struct inode *inode) break; } - trans->block_rsv = &root->fs_info->trans_block_rsv; ret = btrfs_update_inode(trans, root, inode); if (ret) { err = ret; @@ -6630,7 +6599,6 @@ static int btrfs_truncate(struct inode *inode) } if (ret == 0 && inode->i_nlink > 0) { - trans->block_rsv = root->orphan_block_rsv; ret = btrfs_orphan_del(trans, inode); if (ret) err = ret; @@ -6642,20 +6610,15 @@ static int btrfs_truncate(struct inode *inode) ret = btrfs_orphan_del(NULL, inode); } - trans->block_rsv = &root->fs_info->trans_block_rsv; ret = btrfs_update_inode(trans, root, inode); if (ret && !err) err = ret; nr = trans->blocks_used; ret = btrfs_end_transaction_throttle(trans, root); - btrfs_btree_balance_dirty(root, nr); - -out: - btrfs_free_block_rsv(root, rsv); - if (ret && !err) err = ret; + btrfs_btree_balance_dirty(root, nr); return err; } @@ -6664,14 +6627,15 @@ static int btrfs_truncate(struct inode *inode) * create a new subvolume directory/inode (helper for the ioctl). */ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, - struct btrfs_root *new_root, u64 new_dirid) + struct btrfs_root *new_root, + u64 new_dirid, u64 alloc_hint) { struct inode *inode; int err; u64 index = 0; inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, - new_dirid, S_IFDIR | 0700, &index); + new_dirid, alloc_hint, S_IFDIR | 0700, &index); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &btrfs_dir_inode_operations; @@ -6784,6 +6748,21 @@ void btrfs_destroy_inode(struct inode *inode) spin_unlock(&root->fs_info->ordered_extent_lock); } + if (root == root->fs_info->tree_root) { + struct btrfs_block_group_cache *block_group; + + block_group = btrfs_lookup_block_group(root->fs_info, + BTRFS_I(inode)->block_group); + if (block_group && block_group->inode == inode) { + spin_lock(&block_group->lock); + block_group->inode = NULL; + spin_unlock(&block_group->lock); + btrfs_put_block_group(block_group); + } else if (block_group) { + btrfs_put_block_group(block_group); + } + } + spin_lock(&root->orphan_lock); if (!list_empty(&BTRFS_I(inode)->i_orphan)) { printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", @@ -6969,6 +6948,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, goto out_notrans; } + btrfs_set_trans_block_group(trans, new_dir); + if (dest != root) btrfs_record_root_in_trans(trans, dest); @@ -7150,13 +7131,16 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, if (IS_ERR(trans)) return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); if (err) goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, btrfs_ino(dir), objectid, - S_IFLNK|S_IRWXUGO, &index); + BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, + &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -7168,6 +7152,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, goto out_unlock; } + btrfs_set_trans_block_group(trans, inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; @@ -7178,6 +7163,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_op = &btrfs_file_inode_operations; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } + btrfs_update_inode_block_group(trans, inode); + btrfs_update_inode_block_group(trans, dir); if (drop_inode) goto out_unlock; diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c index ac37040e426a..85e818ce00c5 100644 --- a/trunk/fs/btrfs/ioctl.c +++ b/trunk/fs/btrfs/ioctl.c @@ -243,7 +243,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); ret = btrfs_update_inode(trans, root, inode); @@ -414,7 +414,8 @@ static noinline int create_subvol(struct btrfs_root *root, btrfs_record_root_in_trans(trans, new_root); - ret = btrfs_create_subvol_root(trans, new_root, new_dirid); + ret = btrfs_create_subvol_root(trans, new_root, new_dirid, + BTRFS_I(dir)->block_group); /* * insert the directory item */ @@ -706,17 +707,16 @@ static int find_new_extents(struct btrfs_root *root, struct btrfs_file_extent_item *extent; int type; int ret; - u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); if (!path) return -ENOMEM; - min_key.objectid = ino; + min_key.objectid = inode->i_ino; min_key.type = BTRFS_EXTENT_DATA_KEY; min_key.offset = *off; - max_key.objectid = ino; + max_key.objectid = inode->i_ino; max_key.type = (u8)-1; max_key.offset = (u64)-1; @@ -727,7 +727,7 @@ static int find_new_extents(struct btrfs_root *root, path, 0, newer_than); if (ret != 0) goto none; - if (min_key.objectid != ino) + if (min_key.objectid != inode->i_ino) goto none; if (min_key.type != BTRFS_EXTENT_DATA_KEY) goto none; @@ -2489,10 +2489,12 @@ static long btrfs_ioctl_trans_start(struct file *file) if (ret) goto out; - atomic_inc(&root->fs_info->open_ioctl_trans); + mutex_lock(&root->fs_info->trans_mutex); + root->fs_info->open_ioctl_trans++; + mutex_unlock(&root->fs_info->trans_mutex); ret = -ENOMEM; - trans = btrfs_start_ioctl_transaction(root); + trans = btrfs_start_ioctl_transaction(root, 0); if (IS_ERR(trans)) goto out_drop; @@ -2500,7 +2502,9 @@ static long btrfs_ioctl_trans_start(struct file *file) return 0; out_drop: - atomic_dec(&root->fs_info->open_ioctl_trans); + mutex_lock(&root->fs_info->trans_mutex); + root->fs_info->open_ioctl_trans--; + mutex_unlock(&root->fs_info->trans_mutex); mnt_drop_write(file->f_path.mnt); out: return ret; @@ -2734,7 +2738,9 @@ long btrfs_ioctl_trans_end(struct file *file) btrfs_end_transaction(trans, root); - atomic_dec(&root->fs_info->open_ioctl_trans); + mutex_lock(&root->fs_info->trans_mutex); + root->fs_info->open_ioctl_trans--; + mutex_unlock(&root->fs_info->trans_mutex); mnt_drop_write(file->f_path.mnt); return 0; diff --git a/trunk/fs/btrfs/relocation.c b/trunk/fs/btrfs/relocation.c index b1ef27cc673b..ca38eca70af0 100644 --- a/trunk/fs/btrfs/relocation.c +++ b/trunk/fs/btrfs/relocation.c @@ -677,8 +677,6 @@ struct backref_node *build_backref_tree(struct reloc_control *rc, err = -ENOMEM; goto out; } - path1->reada = 1; - path2->reada = 2; node = alloc_backref_node(cache); if (!node) { @@ -2001,7 +1999,6 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, path = btrfs_alloc_path(); if (!path) return -ENOMEM; - path->reada = 1; reloc_root = root->reloc_root; root_item = &reloc_root->root_item; @@ -2142,10 +2139,10 @@ int prepare_to_merge(struct reloc_control *rc, int err) u64 num_bytes = 0; int ret; - spin_lock(&root->fs_info->trans_lock); + mutex_lock(&root->fs_info->trans_mutex); rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; rc->merging_rsv_size += rc->nodes_relocated * 2; - spin_unlock(&root->fs_info->trans_lock); + mutex_unlock(&root->fs_info->trans_mutex); again: if (!err) { num_bytes = rc->merging_rsv_size; @@ -2155,7 +2152,7 @@ int prepare_to_merge(struct reloc_control *rc, int err) err = ret; } - trans = btrfs_join_transaction(rc->extent_root); + trans = btrfs_join_transaction(rc->extent_root, 1); if (IS_ERR(trans)) { if (!err) btrfs_block_rsv_release(rc->extent_root, @@ -2214,9 +2211,9 @@ int merge_reloc_roots(struct reloc_control *rc) int ret; again: root = rc->extent_root; - spin_lock(&root->fs_info->trans_lock); + mutex_lock(&root->fs_info->trans_mutex); list_splice_init(&rc->reloc_roots, &reloc_roots); - spin_unlock(&root->fs_info->trans_lock); + mutex_unlock(&root->fs_info->trans_mutex); while (!list_empty(&reloc_roots)) { found = 1; @@ -3239,7 +3236,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, goto out; } - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); ret = PTR_ERR(trans); @@ -3303,7 +3300,6 @@ static int find_data_references(struct reloc_control *rc, path = btrfs_alloc_path(); if (!path) return -ENOMEM; - path->reada = 1; root = read_fs_root(rc->extent_root->fs_info, ref_root); if (IS_ERR(root)) { @@ -3590,17 +3586,17 @@ int find_next_extent(struct btrfs_trans_handle *trans, static void set_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - spin_lock(&fs_info->trans_lock); + mutex_lock(&fs_info->trans_mutex); fs_info->reloc_ctl = rc; - spin_unlock(&fs_info->trans_lock); + mutex_unlock(&fs_info->trans_mutex); } static void unset_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - spin_lock(&fs_info->trans_lock); + mutex_lock(&fs_info->trans_mutex); fs_info->reloc_ctl = NULL; - spin_unlock(&fs_info->trans_lock); + mutex_unlock(&fs_info->trans_mutex); } static int check_extent_flags(u64 flags) @@ -3649,7 +3645,7 @@ int prepare_to_relocate(struct reloc_control *rc) rc->create_reloc_tree = 1; set_reloc_control(rc); - trans = btrfs_join_transaction(rc->extent_root); + trans = btrfs_join_transaction(rc->extent_root, 1); BUG_ON(IS_ERR(trans)); btrfs_commit_transaction(trans, rc->extent_root); return 0; @@ -3672,7 +3668,6 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) path = btrfs_alloc_path(); if (!path) return -ENOMEM; - path->reada = 1; ret = prepare_to_relocate(rc); if (ret) { @@ -3839,7 +3834,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); /* get rid of pinned extents */ - trans = btrfs_join_transaction(rc->extent_root); + trans = btrfs_join_transaction(rc->extent_root, 1); if (IS_ERR(trans)) err = PTR_ERR(trans); else @@ -4098,7 +4093,6 @@ int btrfs_recover_relocation(struct btrfs_root *root) path = btrfs_alloc_path(); if (!path) return -ENOMEM; - path->reada = -1; key.objectid = BTRFS_TREE_RELOC_OBJECTID; key.type = BTRFS_ROOT_ITEM_KEY; @@ -4165,7 +4159,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) set_reloc_control(rc); - trans = btrfs_join_transaction(rc->extent_root); + trans = btrfs_join_transaction(rc->extent_root, 1); if (IS_ERR(trans)) { unset_reloc_control(rc); err = PTR_ERR(trans); @@ -4199,7 +4193,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) unset_reloc_control(rc); - trans = btrfs_join_transaction(rc->extent_root); + trans = btrfs_join_transaction(rc->extent_root, 1); if (IS_ERR(trans)) err = PTR_ERR(trans); else diff --git a/trunk/fs/btrfs/scrub.c b/trunk/fs/btrfs/scrub.c index df50fd1eca8f..6dfed0c27ac3 100644 --- a/trunk/fs/btrfs/scrub.c +++ b/trunk/fs/btrfs/scrub.c @@ -117,37 +117,33 @@ static void scrub_free_csums(struct scrub_dev *sdev) } } -static void scrub_free_bio(struct bio *bio) -{ - int i; - struct page *last_page = NULL; - - if (!bio) - return; - - for (i = 0; i < bio->bi_vcnt; ++i) { - if (bio->bi_io_vec[i].bv_page == last_page) - continue; - last_page = bio->bi_io_vec[i].bv_page; - __free_page(last_page); - } - bio_put(bio); -} - static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) { int i; + int j; + struct page *last_page; if (!sdev) return; for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { struct scrub_bio *sbio = sdev->bios[i]; + struct bio *bio; if (!sbio) break; - scrub_free_bio(sbio->bio); + bio = sbio->bio; + if (bio) { + last_page = NULL; + for (j = 0; j < bio->bi_vcnt; ++j) { + if (bio->bi_io_vec[j].bv_page == last_page) + continue; + last_page = bio->bi_io_vec[j].bv_page; + __free_page(last_page); + } + bio_put(bio); + } kfree(sbio); } @@ -160,6 +156,8 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) { struct scrub_dev *sdev; int i; + int j; + int ret; struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; sdev = kzalloc(sizeof(*sdev), GFP_NOFS); @@ -167,6 +165,7 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) goto nomem; sdev->dev = dev; for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { + struct bio *bio; struct scrub_bio *sbio; sbio = kzalloc(sizeof(*sbio), GFP_NOFS); @@ -174,10 +173,32 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) goto nomem; sdev->bios[i] = sbio; + bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); + if (!bio) + goto nomem; + sbio->index = i; sbio->sdev = sdev; + sbio->bio = bio; sbio->count = 0; sbio->work.func = scrub_checksum; + bio->bi_private = sdev->bios[i]; + bio->bi_end_io = scrub_bio_end_io; + bio->bi_sector = 0; + bio->bi_bdev = dev->bdev; + bio->bi_size = 0; + + for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) { + struct page *page; + page = alloc_page(GFP_NOFS); + if (!page) + goto nomem; + + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (!ret) + goto nomem; + } + WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO); if (i != SCRUB_BIOS_PER_DEV-1) sdev->bios[i]->next_free = i + 1; @@ -348,6 +369,9 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, int ret; DECLARE_COMPLETION_ONSTACK(complete); + /* we are going to wait on this IO */ + rw |= REQ_SYNC; + bio = bio_alloc(GFP_NOFS, 1); bio->bi_bdev = bdev; bio->bi_sector = sector; @@ -356,7 +380,6 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, bio->bi_private = &complete; submit_bio(rw, bio); - /* this will also unplug the queue */ wait_for_completion(&complete); ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); @@ -371,7 +394,6 @@ static void scrub_bio_end_io(struct bio *bio, int err) struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; sbio->err = err; - sbio->bio = bio; btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); } @@ -431,8 +453,6 @@ static void scrub_checksum(struct btrfs_work *work) } out: - scrub_free_bio(sbio->bio); - sbio->bio = NULL; spin_lock(&sdev->list_lock); sbio->next_free = sdev->first_free; sdev->first_free = sbio->index; @@ -563,50 +583,25 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) static int scrub_submit(struct scrub_dev *sdev) { struct scrub_bio *sbio; - struct bio *bio; - int i; if (sdev->curr == -1) return 0; sbio = sdev->bios[sdev->curr]; - bio = bio_alloc(GFP_NOFS, sbio->count); - if (!bio) - goto nomem; - - bio->bi_private = sbio; - bio->bi_end_io = scrub_bio_end_io; - bio->bi_bdev = sdev->dev->bdev; - bio->bi_sector = sbio->physical >> 9; - - for (i = 0; i < sbio->count; ++i) { - struct page *page; - int ret; - - page = alloc_page(GFP_NOFS); - if (!page) - goto nomem; - - ret = bio_add_page(bio, page, PAGE_SIZE, 0); - if (!ret) { - __free_page(page); - goto nomem; - } - } - + sbio->bio->bi_sector = sbio->physical >> 9; + sbio->bio->bi_size = sbio->count * PAGE_SIZE; + sbio->bio->bi_next = NULL; + sbio->bio->bi_flags |= 1 << BIO_UPTODATE; + sbio->bio->bi_comp_cpu = -1; + sbio->bio->bi_bdev = sdev->dev->bdev; sbio->err = 0; sdev->curr = -1; atomic_inc(&sdev->in_flight); - submit_bio(READ, bio); + submit_bio(0, sbio->bio); return 0; - -nomem: - scrub_free_bio(bio); - - return -ENOMEM; } static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, @@ -638,11 +633,7 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, sbio->logical = logical; } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || sbio->logical + sbio->count * PAGE_SIZE != logical) { - int ret; - - ret = scrub_submit(sdev); - if (ret) - return ret; + scrub_submit(sdev); goto again; } sbio->spag[sbio->count].flags = flags; @@ -654,13 +645,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); } ++sbio->count; - if (sbio->count == SCRUB_PAGES_PER_BIO || force) { - int ret; - - ret = scrub_submit(sdev); - if (ret) - return ret; - } + if (sbio->count == SCRUB_PAGES_PER_BIO || force) + scrub_submit(sdev); return 0; } @@ -741,7 +727,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; - struct blk_plug plug; u64 flags; int ret; int slot; @@ -846,7 +831,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, * the scrub. This might currently (crc32) end up to be about 1MB */ start_stripe = 0; - blk_start_plug(&plug); again: logical = base + offset + start_stripe * increment; for (i = start_stripe; i < nstripes; ++i) { @@ -988,7 +972,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, scrub_submit(sdev); out: - blk_finish_plug(&plug); btrfs_free_path(path); return ret < 0 ? ret : 0; } @@ -1183,7 +1166,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, int ret; struct btrfs_device *dev; - if (btrfs_fs_closing(root->fs_info)) + if (root->fs_info->closing) return -EINVAL; /* diff --git a/trunk/fs/btrfs/super.c b/trunk/fs/btrfs/super.c index 117e74e3604b..9b2e7e5bc3ef 100644 --- a/trunk/fs/btrfs/super.c +++ b/trunk/fs/btrfs/super.c @@ -161,8 +161,7 @@ enum { Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, - Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, - Opt_inode_cache, Opt_err, + Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err, }; static match_table_t tokens = { @@ -194,7 +193,6 @@ static match_table_t tokens = { {Opt_enospc_debug, "enospc_debug"}, {Opt_subvolrootid, "subvolrootid=%d"}, {Opt_defrag, "autodefrag"}, - {Opt_inode_cache, "inode_cache"}, {Opt_err, NULL}, }; @@ -363,10 +361,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) printk(KERN_INFO "btrfs: enabling disk space caching\n"); btrfs_set_opt(info->mount_opt, SPACE_CACHE); break; - case Opt_inode_cache: - printk(KERN_INFO "btrfs: enabling inode map caching\n"); - btrfs_set_opt(info->mount_opt, INODE_MAP_CACHE); - break; case Opt_clear_cache: printk(KERN_INFO "btrfs: force clearing of disk cache\n"); btrfs_set_opt(info->mount_opt, CLEAR_CACHE); diff --git a/trunk/fs/btrfs/transaction.c b/trunk/fs/btrfs/transaction.c index dd719662340e..dc80f7156923 100644 --- a/trunk/fs/btrfs/transaction.c +++ b/trunk/fs/btrfs/transaction.c @@ -35,7 +35,6 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) { WARN_ON(atomic_read(&transaction->use_count) == 0); if (atomic_dec_and_test(&transaction->use_count)) { - BUG_ON(!list_empty(&transaction->list)); memset(transaction, 0, sizeof(*transaction)); kmem_cache_free(btrfs_transaction_cachep, transaction); } @@ -50,72 +49,46 @@ static noinline void switch_commit_root(struct btrfs_root *root) /* * either allocate a new transaction or hop into the existing one */ -static noinline int join_transaction(struct btrfs_root *root, int nofail) +static noinline int join_transaction(struct btrfs_root *root) { struct btrfs_transaction *cur_trans; - - spin_lock(&root->fs_info->trans_lock); - if (root->fs_info->trans_no_join) { - if (!nofail) { - spin_unlock(&root->fs_info->trans_lock); - return -EBUSY; - } - } - cur_trans = root->fs_info->running_transaction; - if (cur_trans) { - atomic_inc(&cur_trans->use_count); + if (!cur_trans) { + cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, + GFP_NOFS); + if (!cur_trans) + return -ENOMEM; + root->fs_info->generation++; + atomic_set(&cur_trans->num_writers, 1); + cur_trans->num_joined = 0; + cur_trans->transid = root->fs_info->generation; + init_waitqueue_head(&cur_trans->writer_wait); + init_waitqueue_head(&cur_trans->commit_wait); + cur_trans->in_commit = 0; + cur_trans->blocked = 0; + atomic_set(&cur_trans->use_count, 1); + cur_trans->commit_done = 0; + cur_trans->start_time = get_seconds(); + + cur_trans->delayed_refs.root = RB_ROOT; + cur_trans->delayed_refs.num_entries = 0; + cur_trans->delayed_refs.num_heads_ready = 0; + cur_trans->delayed_refs.num_heads = 0; + cur_trans->delayed_refs.flushing = 0; + cur_trans->delayed_refs.run_delayed_start = 0; + spin_lock_init(&cur_trans->delayed_refs.lock); + + INIT_LIST_HEAD(&cur_trans->pending_snapshots); + list_add_tail(&cur_trans->list, &root->fs_info->trans_list); + extent_io_tree_init(&cur_trans->dirty_pages, + root->fs_info->btree_inode->i_mapping); + spin_lock(&root->fs_info->new_trans_lock); + root->fs_info->running_transaction = cur_trans; + spin_unlock(&root->fs_info->new_trans_lock); + } else { atomic_inc(&cur_trans->num_writers); cur_trans->num_joined++; - spin_unlock(&root->fs_info->trans_lock); - return 0; } - spin_unlock(&root->fs_info->trans_lock); - - cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); - if (!cur_trans) - return -ENOMEM; - spin_lock(&root->fs_info->trans_lock); - if (root->fs_info->running_transaction) { - kmem_cache_free(btrfs_transaction_cachep, cur_trans); - cur_trans = root->fs_info->running_transaction; - atomic_inc(&cur_trans->use_count); - atomic_inc(&cur_trans->num_writers); - cur_trans->num_joined++; - spin_unlock(&root->fs_info->trans_lock); - return 0; - } - atomic_set(&cur_trans->num_writers, 1); - cur_trans->num_joined = 0; - init_waitqueue_head(&cur_trans->writer_wait); - init_waitqueue_head(&cur_trans->commit_wait); - cur_trans->in_commit = 0; - cur_trans->blocked = 0; - /* - * One for this trans handle, one so it will live on until we - * commit the transaction. - */ - atomic_set(&cur_trans->use_count, 2); - cur_trans->commit_done = 0; - cur_trans->start_time = get_seconds(); - - cur_trans->delayed_refs.root = RB_ROOT; - cur_trans->delayed_refs.num_entries = 0; - cur_trans->delayed_refs.num_heads_ready = 0; - cur_trans->delayed_refs.num_heads = 0; - cur_trans->delayed_refs.flushing = 0; - cur_trans->delayed_refs.run_delayed_start = 0; - spin_lock_init(&cur_trans->commit_lock); - spin_lock_init(&cur_trans->delayed_refs.lock); - - INIT_LIST_HEAD(&cur_trans->pending_snapshots); - list_add_tail(&cur_trans->list, &root->fs_info->trans_list); - extent_io_tree_init(&cur_trans->dirty_pages, - root->fs_info->btree_inode->i_mapping); - root->fs_info->generation++; - cur_trans->transid = root->fs_info->generation; - root->fs_info->running_transaction = cur_trans; - spin_unlock(&root->fs_info->trans_lock); return 0; } @@ -126,28 +99,39 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail) * to make sure the old root from before we joined the transaction is deleted * when the transaction commits */ -int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, - struct btrfs_root *root) +static noinline int record_root_in_trans(struct btrfs_trans_handle *trans, + struct btrfs_root *root) { if (root->ref_cows && root->last_trans < trans->transid) { WARN_ON(root == root->fs_info->extent_root); WARN_ON(root->commit_root != root->node); - spin_lock(&root->fs_info->fs_roots_radix_lock); - if (root->last_trans == trans->transid) { - spin_unlock(&root->fs_info->fs_roots_radix_lock); - return 0; - } - root->last_trans = trans->transid; radix_tree_tag_set(&root->fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); - spin_unlock(&root->fs_info->fs_roots_radix_lock); + root->last_trans = trans->transid; btrfs_init_reloc_root(trans, root); } return 0; } +int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + if (!root->ref_cows) + return 0; + + mutex_lock(&root->fs_info->trans_mutex); + if (root->last_trans == trans->transid) { + mutex_unlock(&root->fs_info->trans_mutex); + return 0; + } + + record_root_in_trans(trans, root); + mutex_unlock(&root->fs_info->trans_mutex); + return 0; +} + /* wait for commit against the current transaction to become unblocked * when this is done, it is safe to start a new transaction, but the current * transaction might not be fully on disk. @@ -156,23 +140,21 @@ static void wait_current_trans(struct btrfs_root *root) { struct btrfs_transaction *cur_trans; - spin_lock(&root->fs_info->trans_lock); cur_trans = root->fs_info->running_transaction; if (cur_trans && cur_trans->blocked) { DEFINE_WAIT(wait); atomic_inc(&cur_trans->use_count); - spin_unlock(&root->fs_info->trans_lock); while (1) { prepare_to_wait(&root->fs_info->transaction_wait, &wait, TASK_UNINTERRUPTIBLE); if (!cur_trans->blocked) break; + mutex_unlock(&root->fs_info->trans_mutex); schedule(); + mutex_lock(&root->fs_info->trans_mutex); } finish_wait(&root->fs_info->transaction_wait, &wait); put_transaction(cur_trans); - } else { - spin_unlock(&root->fs_info->trans_lock); } } @@ -185,16 +167,10 @@ enum btrfs_trans_type { static int may_wait_transaction(struct btrfs_root *root, int type) { - if (root->fs_info->log_root_recovering) - return 0; - - if (type == TRANS_USERSPACE) - return 1; - - if (type == TRANS_START && - !atomic_read(&root->fs_info->open_ioctl_trans)) + if (!root->fs_info->log_root_recovering && + ((type == TRANS_START && !root->fs_info->open_ioctl_trans) || + type == TRANS_USERSPACE)) return 1; - return 0; } @@ -208,44 +184,36 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) return ERR_PTR(-EROFS); - - if (current->journal_info) { - WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK); - h = current->journal_info; - h->use_count++; - h->orig_rsv = h->block_rsv; - h->block_rsv = NULL; - goto got_it; - } again: h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); if (!h) return ERR_PTR(-ENOMEM); + if (type != TRANS_JOIN_NOLOCK) + mutex_lock(&root->fs_info->trans_mutex); if (may_wait_transaction(root, type)) wait_current_trans(root); - do { - ret = join_transaction(root, type == TRANS_JOIN_NOLOCK); - if (ret == -EBUSY) - wait_current_trans(root); - } while (ret == -EBUSY); - + ret = join_transaction(root); if (ret < 0) { kmem_cache_free(btrfs_trans_handle_cachep, h); + if (type != TRANS_JOIN_NOLOCK) + mutex_unlock(&root->fs_info->trans_mutex); return ERR_PTR(ret); } cur_trans = root->fs_info->running_transaction; + atomic_inc(&cur_trans->use_count); + if (type != TRANS_JOIN_NOLOCK) + mutex_unlock(&root->fs_info->trans_mutex); h->transid = cur_trans->transid; h->transaction = cur_trans; h->blocks_used = 0; + h->block_group = 0; h->bytes_reserved = 0; h->delayed_ref_updates = 0; - h->use_count = 1; h->block_rsv = NULL; - h->orig_rsv = NULL; smp_mb(); if (cur_trans->blocked && may_wait_transaction(root, type)) { @@ -273,8 +241,11 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, } } -got_it: - btrfs_record_root_in_trans(h, root); + if (type != TRANS_JOIN_NOLOCK) + mutex_lock(&root->fs_info->trans_mutex); + record_root_in_trans(h, root); + if (type != TRANS_JOIN_NOLOCK) + mutex_unlock(&root->fs_info->trans_mutex); if (!current->journal_info && type != TRANS_USERSPACE) current->journal_info = h; @@ -286,19 +257,22 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, { return start_transaction(root, num_items, TRANS_START); } -struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) +struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, + int num_blocks) { return start_transaction(root, 0, TRANS_JOIN); } -struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) +struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, + int num_blocks) { return start_transaction(root, 0, TRANS_JOIN_NOLOCK); } -struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) +struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, + int num_blocks) { - return start_transaction(root, 0, TRANS_USERSPACE); + return start_transaction(r, 0, TRANS_USERSPACE); } /* wait for a transaction commit to be fully complete */ @@ -306,13 +280,17 @@ static noinline int wait_for_commit(struct btrfs_root *root, struct btrfs_transaction *commit) { DEFINE_WAIT(wait); + mutex_lock(&root->fs_info->trans_mutex); while (!commit->commit_done) { prepare_to_wait(&commit->commit_wait, &wait, TASK_UNINTERRUPTIBLE); if (commit->commit_done) break; + mutex_unlock(&root->fs_info->trans_mutex); schedule(); + mutex_lock(&root->fs_info->trans_mutex); } + mutex_unlock(&root->fs_info->trans_mutex); finish_wait(&commit->commit_wait, &wait); return 0; } @@ -322,56 +300,59 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) struct btrfs_transaction *cur_trans = NULL, *t; int ret; + mutex_lock(&root->fs_info->trans_mutex); + ret = 0; if (transid) { if (transid <= root->fs_info->last_trans_committed) - goto out; + goto out_unlock; /* find specified transaction */ - spin_lock(&root->fs_info->trans_lock); list_for_each_entry(t, &root->fs_info->trans_list, list) { if (t->transid == transid) { cur_trans = t; - atomic_inc(&cur_trans->use_count); break; } if (t->transid > transid) break; } - spin_unlock(&root->fs_info->trans_lock); ret = -EINVAL; if (!cur_trans) - goto out; /* bad transid */ + goto out_unlock; /* bad transid */ } else { /* find newest transaction that is committing | committed */ - spin_lock(&root->fs_info->trans_lock); list_for_each_entry_reverse(t, &root->fs_info->trans_list, list) { if (t->in_commit) { if (t->commit_done) - goto out; + goto out_unlock; cur_trans = t; - atomic_inc(&cur_trans->use_count); break; } } - spin_unlock(&root->fs_info->trans_lock); if (!cur_trans) - goto out; /* nothing committing|committed */ + goto out_unlock; /* nothing committing|committed */ } + atomic_inc(&cur_trans->use_count); + mutex_unlock(&root->fs_info->trans_mutex); + wait_for_commit(root, cur_trans); + mutex_lock(&root->fs_info->trans_mutex); put_transaction(cur_trans); ret = 0; -out: +out_unlock: + mutex_unlock(&root->fs_info->trans_mutex); return ret; } void btrfs_throttle(struct btrfs_root *root) { - if (!atomic_read(&root->fs_info->open_ioctl_trans)) + mutex_lock(&root->fs_info->trans_mutex); + if (!root->fs_info->open_ioctl_trans) wait_current_trans(root); + mutex_unlock(&root->fs_info->trans_mutex); } static int should_end_transaction(struct btrfs_trans_handle *trans, @@ -389,7 +370,6 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_transaction *cur_trans = trans->transaction; int updates; - smp_mb(); if (cur_trans->blocked || cur_trans->delayed_refs.flushing) return 1; @@ -408,11 +388,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_fs_info *info = root->fs_info; int count = 0; - if (--trans->use_count) { - trans->block_rsv = trans->orig_rsv; - return 0; - } - while (count < 4) { unsigned long cur = trans->delayed_ref_updates; trans->delayed_ref_updates = 0; @@ -435,11 +410,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, btrfs_trans_release_metadata(trans, root); - if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && - should_end_transaction(trans, root)) { + if (lock && !root->fs_info->open_ioctl_trans && + should_end_transaction(trans, root)) trans->transaction->blocked = 1; - smp_wmb(); - } if (lock && cur_trans->blocked && !cur_trans->in_commit) { if (throttle) @@ -730,9 +703,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, */ int btrfs_add_dead_root(struct btrfs_root *root) { - spin_lock(&root->fs_info->trans_lock); + mutex_lock(&root->fs_info->trans_mutex); list_add(&root->root_list, &root->fs_info->dead_roots); - spin_unlock(&root->fs_info->trans_lock); + mutex_unlock(&root->fs_info->trans_mutex); return 0; } @@ -748,7 +721,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, int ret; int err = 0; - spin_lock(&fs_info->fs_roots_radix_lock); while (1) { ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, (void **)gang, 0, @@ -761,7 +733,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, radix_tree_tag_clear(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); - spin_unlock(&fs_info->fs_roots_radix_lock); btrfs_free_log(trans, root); btrfs_update_reloc_root(trans, root); @@ -782,12 +753,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, err = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); - spin_lock(&fs_info->fs_roots_radix_lock); if (err) break; } } - spin_unlock(&fs_info->fs_roots_radix_lock); return err; } @@ -817,7 +786,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) btrfs_btree_balance_dirty(info->tree_root, nr); cond_resched(); - if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) + if (root->fs_info->closing || ret != -EAGAIN) break; } root->defrag_running = 0; @@ -882,7 +851,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, parent = dget_parent(dentry); parent_inode = parent->d_inode; parent_root = BTRFS_I(parent_inode)->root; - btrfs_record_root_in_trans(trans, parent_root); + record_root_in_trans(trans, parent_root); /* * insert the directory item @@ -900,7 +869,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); - btrfs_record_root_in_trans(trans, root); + record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); btrfs_check_and_init_root_item(new_root_item); @@ -998,20 +967,20 @@ static void update_super_roots(struct btrfs_root *root) int btrfs_transaction_in_commit(struct btrfs_fs_info *info) { int ret = 0; - spin_lock(&info->trans_lock); + spin_lock(&info->new_trans_lock); if (info->running_transaction) ret = info->running_transaction->in_commit; - spin_unlock(&info->trans_lock); + spin_unlock(&info->new_trans_lock); return ret; } int btrfs_transaction_blocked(struct btrfs_fs_info *info) { int ret = 0; - spin_lock(&info->trans_lock); + spin_lock(&info->new_trans_lock); if (info->running_transaction) ret = info->running_transaction->blocked; - spin_unlock(&info->trans_lock); + spin_unlock(&info->new_trans_lock); return ret; } @@ -1035,7 +1004,9 @@ static void wait_current_trans_commit_start(struct btrfs_root *root, &wait); break; } + mutex_unlock(&root->fs_info->trans_mutex); schedule(); + mutex_lock(&root->fs_info->trans_mutex); finish_wait(&root->fs_info->transaction_blocked_wait, &wait); } } @@ -1061,7 +1032,9 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, &wait); break; } + mutex_unlock(&root->fs_info->trans_mutex); schedule(); + mutex_lock(&root->fs_info->trans_mutex); finish_wait(&root->fs_info->transaction_wait, &wait); } @@ -1099,7 +1072,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, INIT_DELAYED_WORK(&ac->work, do_async_commit); ac->root = root; - ac->newtrans = btrfs_join_transaction(root); + ac->newtrans = btrfs_join_transaction(root, 0); if (IS_ERR(ac->newtrans)) { int err = PTR_ERR(ac->newtrans); kfree(ac); @@ -1107,18 +1080,22 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, } /* take transaction reference */ + mutex_lock(&root->fs_info->trans_mutex); cur_trans = trans->transaction; atomic_inc(&cur_trans->use_count); + mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); schedule_delayed_work(&ac->work, 0); /* wait for transaction to start and unblock */ + mutex_lock(&root->fs_info->trans_mutex); if (wait_for_unblock) wait_current_trans_commit_start_and_unblock(root, cur_trans); else wait_current_trans_commit_start(root, cur_trans); put_transaction(cur_trans); + mutex_unlock(&root->fs_info->trans_mutex); return 0; } @@ -1162,41 +1139,38 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = btrfs_run_delayed_refs(trans, root, 0); BUG_ON(ret); - spin_lock(&cur_trans->commit_lock); + mutex_lock(&root->fs_info->trans_mutex); if (cur_trans->in_commit) { - spin_unlock(&cur_trans->commit_lock); atomic_inc(&cur_trans->use_count); + mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); ret = wait_for_commit(root, cur_trans); BUG_ON(ret); + mutex_lock(&root->fs_info->trans_mutex); put_transaction(cur_trans); + mutex_unlock(&root->fs_info->trans_mutex); return 0; } trans->transaction->in_commit = 1; trans->transaction->blocked = 1; - spin_unlock(&cur_trans->commit_lock); wake_up(&root->fs_info->transaction_blocked_wait); - spin_lock(&root->fs_info->trans_lock); if (cur_trans->list.prev != &root->fs_info->trans_list) { prev_trans = list_entry(cur_trans->list.prev, struct btrfs_transaction, list); if (!prev_trans->commit_done) { atomic_inc(&prev_trans->use_count); - spin_unlock(&root->fs_info->trans_lock); + mutex_unlock(&root->fs_info->trans_mutex); wait_for_commit(root, prev_trans); + mutex_lock(&root->fs_info->trans_mutex); put_transaction(prev_trans); - } else { - spin_unlock(&root->fs_info->trans_lock); } - } else { - spin_unlock(&root->fs_info->trans_lock); } if (now < cur_trans->start_time || now - cur_trans->start_time < 1) @@ -1204,12 +1178,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, do { int snap_pending = 0; - joined = cur_trans->num_joined; if (!list_empty(&trans->transaction->pending_snapshots)) snap_pending = 1; WARN_ON(cur_trans != trans->transaction); + mutex_unlock(&root->fs_info->trans_mutex); if (flush_on_commit || snap_pending) { btrfs_start_delalloc_inodes(root, 1); @@ -1232,15 +1206,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, prepare_to_wait(&cur_trans->writer_wait, &wait, TASK_UNINTERRUPTIBLE); + smp_mb(); if (atomic_read(&cur_trans->num_writers) > 1) schedule_timeout(MAX_SCHEDULE_TIMEOUT); else if (should_grow) schedule_timeout(1); + mutex_lock(&root->fs_info->trans_mutex); finish_wait(&cur_trans->writer_wait, &wait); - spin_lock(&root->fs_info->trans_lock); - root->fs_info->trans_no_join = 1; - spin_unlock(&root->fs_info->trans_lock); } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); @@ -1285,6 +1258,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_prepare_extent_commit(trans, root); cur_trans = root->fs_info->running_transaction; + spin_lock(&root->fs_info->new_trans_lock); + root->fs_info->running_transaction = NULL; + spin_unlock(&root->fs_info->new_trans_lock); btrfs_set_root_node(&root->fs_info->tree_root->root_item, root->fs_info->tree_root->node); @@ -1305,13 +1281,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, sizeof(root->fs_info->super_copy)); trans->transaction->blocked = 0; - spin_lock(&root->fs_info->trans_lock); - root->fs_info->running_transaction = NULL; - root->fs_info->trans_no_join = 0; - spin_unlock(&root->fs_info->trans_lock); wake_up(&root->fs_info->transaction_wait); + mutex_unlock(&root->fs_info->trans_mutex); ret = btrfs_write_and_wait_transaction(trans, root); BUG_ON(ret); write_ctree_super(trans, root, 0); @@ -1324,21 +1297,22 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_finish_extent_commit(trans, root); + mutex_lock(&root->fs_info->trans_mutex); + cur_trans->commit_done = 1; root->fs_info->last_trans_committed = cur_trans->transid; wake_up(&cur_trans->commit_wait); - spin_lock(&root->fs_info->trans_lock); list_del_init(&cur_trans->list); - spin_unlock(&root->fs_info->trans_lock); - put_transaction(cur_trans); put_transaction(cur_trans); trace_btrfs_transaction_commit(root); + mutex_unlock(&root->fs_info->trans_mutex); + btrfs_scrub_continue(root); if (current->journal_info == trans) @@ -1360,9 +1334,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) LIST_HEAD(list); struct btrfs_fs_info *fs_info = root->fs_info; - spin_lock(&fs_info->trans_lock); + mutex_lock(&fs_info->trans_mutex); list_splice_init(&fs_info->dead_roots, &list); - spin_unlock(&fs_info->trans_lock); + mutex_unlock(&fs_info->trans_mutex); while (!list_empty(&list)) { root = list_entry(list.next, struct btrfs_root, root_list); diff --git a/trunk/fs/btrfs/transaction.h b/trunk/fs/btrfs/transaction.h index 02564e6230ac..804c88639e5d 100644 --- a/trunk/fs/btrfs/transaction.h +++ b/trunk/fs/btrfs/transaction.h @@ -28,12 +28,10 @@ struct btrfs_transaction { * transaction can end */ atomic_t num_writers; - atomic_t use_count; unsigned long num_joined; - - spinlock_t commit_lock; int in_commit; + atomic_t use_count; int commit_done; int blocked; struct list_head list; @@ -47,14 +45,13 @@ struct btrfs_transaction { struct btrfs_trans_handle { u64 transid; + u64 block_group; u64 bytes_reserved; - unsigned long use_count; unsigned long blocks_reserved; unsigned long blocks_used; unsigned long delayed_ref_updates; struct btrfs_transaction *transaction; struct btrfs_block_rsv *block_rsv; - struct btrfs_block_rsv *orig_rsv; }; struct btrfs_pending_snapshot { @@ -69,6 +66,19 @@ struct btrfs_pending_snapshot { struct list_head list; }; +static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans, + struct inode *inode) +{ + trans->block_group = BTRFS_I(inode)->block_group; +} + +static inline void btrfs_update_inode_block_group( + struct btrfs_trans_handle *trans, + struct inode *inode) +{ + BTRFS_I(inode)->block_group = trans->block_group; +} + static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, struct inode *inode) { @@ -82,9 +92,12 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, struct btrfs_root *root); struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, int num_items); -struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); -struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); -struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root); +struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, + int num_blocks); +struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, + int num_blocks); +struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, + int num_blocks); int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); diff --git a/trunk/fs/btrfs/volumes.c b/trunk/fs/btrfs/volumes.c index da541dfca2e3..c48214ef5c09 100644 --- a/trunk/fs/btrfs/volumes.c +++ b/trunk/fs/btrfs/volumes.c @@ -504,7 +504,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) BUG_ON(!new_device); memcpy(new_device, device, sizeof(*new_device)); new_device->name = kstrdup(device->name, GFP_NOFS); - BUG_ON(device->name && !new_device->name); + BUG_ON(!new_device->name); new_device->bdev = NULL; new_device->writeable = 0; new_device->in_fs_metadata = 0; diff --git a/trunk/fs/btrfs/xattr.c b/trunk/fs/btrfs/xattr.c index 5366fe452ab0..f3107e4b4d56 100644 --- a/trunk/fs/btrfs/xattr.c +++ b/trunk/fs/btrfs/xattr.c @@ -158,6 +158,8 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans, if (IS_ERR(trans)) return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, inode); + ret = do_setxattr(trans, inode, name, value, size, flags); if (ret) goto out; diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index e2e4e8d032ee..1ab641f2e78e 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -2579,7 +2579,6 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) if (error) goto out; - shrink_dcache_parent(dentry); error = dir->i_op->rmdir(dir, dentry); if (error) goto out; @@ -2994,8 +2993,6 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry)) goto out; - if (target) - shrink_dcache_parent(new_dentry); error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (error) goto out; diff --git a/trunk/fs/partitions/check.c b/trunk/fs/partitions/check.c index d545e97d99c3..f82e762eeca2 100644 --- a/trunk/fs/partitions/check.c +++ b/trunk/fs/partitions/check.c @@ -255,7 +255,13 @@ ssize_t part_discard_alignment_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); - return sprintf(buf, "%u\n", p->discard_alignment); + struct gendisk *disk = dev_to_disk(dev); + unsigned int alignment = 0; + + if (disk->queue) + alignment = queue_limit_discard_alignment(&disk->queue->limits, + p->start_sect); + return sprintf(buf, "%u\n", alignment); } ssize_t part_stat_show(struct device *dev, @@ -449,8 +455,6 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, p->start_sect = start; p->alignment_offset = queue_limit_alignment_offset(&disk->queue->limits, start); - p->discard_alignment = - queue_limit_discard_alignment(&disk->queue->limits, start); p->nr_sects = len; p->partno = partno; p->policy = get_disk_ro(disk); diff --git a/trunk/fs/ubifs/io.c b/trunk/fs/ubifs/io.c index 3be645e012c9..166951e0dcd3 100644 --- a/trunk/fs/ubifs/io.c +++ b/trunk/fs/ubifs/io.c @@ -581,7 +581,6 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ubifs_assert(wbuf->size % c->min_io_size == 0); ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); ubifs_assert(!c->ro_media && !c->ro_mount); - ubifs_assert(!c->space_fixup); if (c->leb_size - wbuf->offs >= c->max_write_size) ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); @@ -760,7 +759,6 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); ubifs_assert(!c->ro_media && !c->ro_mount); - ubifs_assert(!c->space_fixup); if (c->ro_error) return -EROFS; diff --git a/trunk/fs/ubifs/journal.c b/trunk/fs/ubifs/journal.c index cef0460f4c54..34b1679e6e3a 100644 --- a/trunk/fs/ubifs/journal.c +++ b/trunk/fs/ubifs/journal.c @@ -669,7 +669,6 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, out_release: release_head(c, BASEHD); - kfree(dent); out_ro: ubifs_ro_mode(c, err); if (last_reference) diff --git a/trunk/fs/ubifs/orphan.c b/trunk/fs/ubifs/orphan.c index a5422fffbd69..bd644bf587a8 100644 --- a/trunk/fs/ubifs/orphan.c +++ b/trunk/fs/ubifs/orphan.c @@ -674,7 +674,7 @@ static int kill_orphans(struct ubifs_info *c) if (IS_ERR(sleb)) { if (PTR_ERR(sleb) == -EUCLEAN) sleb = ubifs_recover_leb(c, lnum, 0, - c->sbuf, -1); + c->sbuf, 0); if (IS_ERR(sleb)) { err = PTR_ERR(sleb); break; diff --git a/trunk/fs/ubifs/recovery.c b/trunk/fs/ubifs/recovery.c index 783d8e0beb76..731d9e2e7b50 100644 --- a/trunk/fs/ubifs/recovery.c +++ b/trunk/fs/ubifs/recovery.c @@ -564,15 +564,19 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, } /** - * drop_last_group - drop the last group of nodes. + * drop_last_node - drop the last node or group of nodes. * @sleb: scanned LEB information * @offs: offset of dropped nodes is returned here + * @grouped: non-zero if whole group of nodes have to be dropped * * This is a helper function for 'ubifs_recover_leb()' which drops the last - * group of nodes of the scanned LEB. + * node of the scanned LEB or the last group of nodes if @grouped is not zero. + * This function returns %1 if a node was dropped and %0 otherwise. */ -static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs) +static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) { + int dropped = 0; + while (!list_empty(&sleb->nodes)) { struct ubifs_scan_node *snod; struct ubifs_ch *ch; @@ -581,40 +585,17 @@ static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs) list); ch = snod->node; if (ch->group_type != UBIFS_IN_NODE_GROUP) - break; - - dbg_rcvry("dropping grouped node at %d:%d", - sleb->lnum, snod->offs); - *offs = snod->offs; - list_del(&snod->list); - kfree(snod); - sleb->nodes_cnt -= 1; - } -} - -/** - * drop_last_node - drop the last node. - * @sleb: scanned LEB information - * @offs: offset of dropped nodes is returned here - * @grouped: non-zero if whole group of nodes have to be dropped - * - * This is a helper function for 'ubifs_recover_leb()' which drops the last - * node of the scanned LEB. - */ -static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs) -{ - struct ubifs_scan_node *snod; - - if (!list_empty(&sleb->nodes)) { - snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, - list); - - dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs); + return dropped; + dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs); *offs = snod->offs; list_del(&snod->list); kfree(snod); sleb->nodes_cnt -= 1; + dropped = 1; + if (!grouped) + break; } + return dropped; } /** @@ -623,8 +604,7 @@ static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs) * @lnum: LEB number * @offs: offset * @sbuf: LEB-sized buffer to use - * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not - * belong to any journal head) + * @grouped: nodes may be grouped for recovery * * This function does a scan of a LEB, but caters for errors that might have * been caused by the unclean unmount from which we are attempting to recover. @@ -632,14 +612,13 @@ static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs) * found, and a negative error code in case of failure. */ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, - int offs, void *sbuf, int jhead) + int offs, void *sbuf, int grouped) { int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit; - int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped; struct ubifs_scan_leb *sleb; void *buf = sbuf + offs; - dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped); + dbg_rcvry("%d:%d", lnum, offs); sleb = ubifs_start_scan(c, lnum, offs, sbuf); if (IS_ERR(sleb)) @@ -656,7 +635,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, * Scan quietly until there is an error from which we cannot * recover */ - ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); + ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0); if (ret == SCANNED_A_NODE) { /* A valid node, and not a padding node */ struct ubifs_ch *ch = buf; @@ -716,62 +695,59 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, * If nodes are grouped, always drop the incomplete group at * the end. */ - drop_last_group(sleb, &offs); + drop_last_node(sleb, &offs, 1); - if (jhead == GCHD) { - /* - * If this LEB belongs to the GC head then while we are in the - * middle of the same min. I/O unit keep dropping nodes. So - * basically, what we want is to make sure that the last min. - * I/O unit where we saw the corruption is dropped completely - * with all the uncorrupted nodes which may possibly sit there. - * - * In other words, let's name the min. I/O unit where the - * corruption starts B, and the previous min. I/O unit A. The - * below code tries to deal with a situation when half of B - * contains valid nodes or the end of a valid node, and the - * second half of B contains corrupted data or garbage. This - * means that UBIFS had been writing to B just before the power - * cut happened. I do not know how realistic is this scenario - * that half of the min. I/O unit had been written successfully - * and the other half not, but this is possible in our 'failure - * mode emulation' infrastructure at least. - * - * So what is the problem, why we need to drop those nodes? Why - * can't we just clean-up the second half of B by putting a - * padding node there? We can, and this works fine with one - * exception which was reproduced with power cut emulation - * testing and happens extremely rarely. - * - * Imagine the file-system is full, we run GC which starts - * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is - * the current GC head LEB). The @c->gc_lnum is -1, which means - * that GC will retain LEB X and will try to continue. Imagine - * that LEB X is currently the dirtiest LEB, and the amount of - * used space in LEB Y is exactly the same as amount of free - * space in LEB X. - * - * And a power cut happens when nodes are moved from LEB X to - * LEB Y. We are here trying to recover LEB Y which is the GC - * head LEB. We find the min. I/O unit B as described above. - * Then we clean-up LEB Y by padding min. I/O unit. And later - * 'ubifs_rcvry_gc_commit()' function fails, because it cannot - * find a dirty LEB which could be GC'd into LEB Y! Even LEB X - * does not match because the amount of valid nodes there does - * not fit the free space in LEB Y any more! And this is - * because of the padding node which we added to LEB Y. The - * user-visible effect of this which I once observed and - * analysed is that we cannot mount the file-system with - * -ENOSPC error. - * - * So obviously, to make sure that situation does not happen we - * should free min. I/O unit B in LEB Y completely and the last - * used min. I/O unit in LEB Y should be A. This is basically - * what the below code tries to do. - */ - while (offs > min_io_unit) - drop_last_node(sleb, &offs); - } + /* + * While we are in the middle of the same min. I/O unit keep dropping + * nodes. So basically, what we want is to make sure that the last min. + * I/O unit where we saw the corruption is dropped completely with all + * the uncorrupted node which may possibly sit there. + * + * In other words, let's name the min. I/O unit where the corruption + * starts B, and the previous min. I/O unit A. The below code tries to + * deal with a situation when half of B contains valid nodes or the end + * of a valid node, and the second half of B contains corrupted data or + * garbage. This means that UBIFS had been writing to B just before the + * power cut happened. I do not know how realistic is this scenario + * that half of the min. I/O unit had been written successfully and the + * other half not, but this is possible in our 'failure mode emulation' + * infrastructure at least. + * + * So what is the problem, why we need to drop those nodes? Whey can't + * we just clean-up the second half of B by putting a padding node + * there? We can, and this works fine with one exception which was + * reproduced with power cut emulation testing and happens extremely + * rarely. The description follows, but it is worth noting that that is + * only about the GC head, so we could do this trick only if the bud + * belongs to the GC head, but it does not seem to be worth an + * additional "if" statement. + * + * So, imagine the file-system is full, we run GC which is moving valid + * nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head + * LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X + * and will try to continue. Imagine that LEB X is currently the + * dirtiest LEB, and the amount of used space in LEB Y is exactly the + * same as amount of free space in LEB X. + * + * And a power cut happens when nodes are moved from LEB X to LEB Y. We + * are here trying to recover LEB Y which is the GC head LEB. We find + * the min. I/O unit B as described above. Then we clean-up LEB Y by + * padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function + * fails, because it cannot find a dirty LEB which could be GC'd into + * LEB Y! Even LEB X does not match because the amount of valid nodes + * there does not fit the free space in LEB Y any more! And this is + * because of the padding node which we added to LEB Y. The + * user-visible effect of this which I once observed and analysed is + * that we cannot mount the file-system with -ENOSPC error. + * + * So obviously, to make sure that situation does not happen we should + * free min. I/O unit B in LEB Y completely and the last used min. I/O + * unit in LEB Y should be A. This is basically what the below code + * tries to do. + */ + while (min_io_unit == round_down(offs, c->min_io_size) && + min_io_unit != offs && + drop_last_node(sleb, &offs, grouped)); buf = sbuf + offs; len = c->leb_size - offs; @@ -905,7 +881,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, } ubifs_scan_destroy(sleb); } - return ubifs_recover_leb(c, lnum, offs, sbuf, -1); + return ubifs_recover_leb(c, lnum, offs, sbuf, 0); } /** diff --git a/trunk/fs/ubifs/replay.c b/trunk/fs/ubifs/replay.c index 5e97161ce4d3..6617280d1679 100644 --- a/trunk/fs/ubifs/replay.c +++ b/trunk/fs/ubifs/replay.c @@ -557,7 +557,8 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) * these LEBs could possibly be written to at the power cut * time. */ - sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead); + sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, + b->bud->jhead != GCHD); else sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0); if (IS_ERR(sleb)) diff --git a/trunk/fs/ubifs/shrinker.c b/trunk/fs/ubifs/shrinker.c index 9e1d05666fed..ca953a945029 100644 --- a/trunk/fs/ubifs/shrinker.c +++ b/trunk/fs/ubifs/shrinker.c @@ -284,11 +284,7 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc) long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); if (nr == 0) - /* - * Due to the way UBIFS updates the clean znode counter it may - * temporarily be negative. - */ - return clean_zn_cnt >= 0 ? clean_zn_cnt : 1; + return clean_zn_cnt; if (!clean_zn_cnt) { /* diff --git a/trunk/fs/ubifs/super.c b/trunk/fs/ubifs/super.c index b5aeb5a8ebed..1ab0d22e4c94 100644 --- a/trunk/fs/ubifs/super.c +++ b/trunk/fs/ubifs/super.c @@ -811,18 +811,15 @@ static int alloc_wbufs(struct ubifs_info *c) c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; c->jheads[i].wbuf.jhead = i; - c->jheads[i].grouped = 1; } c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM; /* * Garbage Collector head likely contains long-term data and - * does not need to be synchronized by timer. Also GC head nodes are - * not grouped. + * does not need to be synchronized by timer. */ c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; c->jheads[GCHD].wbuf.no_timer = 1; - c->jheads[GCHD].grouped = 0; return 0; } @@ -1287,25 +1284,12 @@ static int mount_ubifs(struct ubifs_info *c) if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { ubifs_msg("recovery needed"); c->need_recovery = 1; - } - - if (c->need_recovery && !c->ro_mount) { - err = ubifs_recover_inl_heads(c, c->sbuf); - if (err) - goto out_master; - } - - err = ubifs_lpt_init(c, 1, !c->ro_mount); - if (err) - goto out_master; - - if (!c->ro_mount && c->space_fixup) { - err = ubifs_fixup_free_space(c); - if (err) - goto out_master; - } - - if (!c->ro_mount) { + if (!c->ro_mount) { + err = ubifs_recover_inl_heads(c, c->sbuf); + if (err) + goto out_master; + } + } else if (!c->ro_mount) { /* * Set the "dirty" flag so that if we reboot uncleanly we * will notice this immediately on the next mount. @@ -1313,9 +1297,13 @@ static int mount_ubifs(struct ubifs_info *c) c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); err = ubifs_write_master(c); if (err) - goto out_lpt; + goto out_master; } + err = ubifs_lpt_init(c, 1, !c->ro_mount); + if (err) + goto out_lpt; + err = dbg_check_idx_size(c, c->bi.old_idx_sz); if (err) goto out_lpt; @@ -1408,6 +1396,12 @@ static int mount_ubifs(struct ubifs_info *c) } else ubifs_assert(c->lst.taken_empty_lebs > 0); + if (!c->ro_mount && c->space_fixup) { + err = ubifs_fixup_free_space(c); + if (err) + goto out_infos; + } + err = dbg_check_filesystem(c); if (err) goto out_infos; diff --git a/trunk/fs/ubifs/tnc.c b/trunk/fs/ubifs/tnc.c index 91b4213dde84..8119b1fd8d94 100644 --- a/trunk/fs/ubifs/tnc.c +++ b/trunk/fs/ubifs/tnc.c @@ -2876,13 +2876,12 @@ static void tnc_destroy_cnext(struct ubifs_info *c) */ void ubifs_tnc_close(struct ubifs_info *c) { + long clean_freed; + tnc_destroy_cnext(c); if (c->zroot.znode) { - long n; - - ubifs_destroy_tnc_subtree(c->zroot.znode); - n = atomic_long_read(&c->clean_zn_cnt); - atomic_long_sub(n, &ubifs_clean_zn_cnt); + clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode); + atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt); } kfree(c->gap_lebs); kfree(c->ilebs); diff --git a/trunk/fs/ubifs/ubifs.h b/trunk/fs/ubifs/ubifs.h index f79983d6f860..a70d7b4ffb25 100644 --- a/trunk/fs/ubifs/ubifs.h +++ b/trunk/fs/ubifs/ubifs.h @@ -722,14 +722,12 @@ struct ubifs_bud { * struct ubifs_jhead - journal head. * @wbuf: head's write-buffer * @buds_list: list of bud LEBs belonging to this journal head - * @grouped: non-zero if UBIFS groups nodes when writing to this journal head * * Note, the @buds list is protected by the @c->buds_lock. */ struct ubifs_jhead { struct ubifs_wbuf wbuf; struct list_head buds_list; - unsigned int grouped:1; }; /** @@ -1744,7 +1742,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum); int ubifs_recover_master_node(struct ubifs_info *c); int ubifs_write_rcvrd_mst_node(struct ubifs_info *c); struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, - int offs, void *sbuf, int jhead); + int offs, void *sbuf, int grouped); struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf); int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf); diff --git a/trunk/include/asm-generic/unistd.h b/trunk/include/asm-generic/unistd.h index 4f76959397fa..ae90e0f63995 100644 --- a/trunk/include/asm-generic/unistd.h +++ b/trunk/include/asm-generic/unistd.h @@ -683,11 +683,9 @@ __SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime) __SYSCALL(__NR_syncfs, sys_syncfs) #define __NR_setns 268 __SYSCALL(__NR_setns, sys_setns) -#define __NR_sendmmsg 269 -__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg) #undef __NR_syscalls -#define __NR_syscalls 270 +#define __NR_syscalls 269 /* * All syscalls below here should go away really, diff --git a/trunk/include/linux/blkdev.h b/trunk/include/linux/blkdev.h index 1a23722e8878..ae9091a68480 100644 --- a/trunk/include/linux/blkdev.h +++ b/trunk/include/linux/blkdev.h @@ -1282,8 +1282,8 @@ queue_max_integrity_segments(struct request_queue *q) #define blk_get_integrity(a) (0) #define blk_integrity_compare(a, b) (0) #define blk_integrity_register(a, b) (0) -#define blk_integrity_unregister(a) do { } while (0) -#define blk_queue_max_integrity_segments(a, b) do { } while (0) +#define blk_integrity_unregister(a) do { } while (0); +#define blk_queue_max_integrity_segments(a, b) do { } while (0); #define queue_max_integrity_segments(a) (0) #define blk_integrity_merge_rq(a, b, c) (0) #define blk_integrity_merge_bio(a, b, c) (0) diff --git a/trunk/include/linux/dma_remapping.h b/trunk/include/linux/dma_remapping.h index bbd8661b3473..5619f8522738 100644 --- a/trunk/include/linux/dma_remapping.h +++ b/trunk/include/linux/dma_remapping.h @@ -9,12 +9,8 @@ #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) -#define VTD_STRIDE_SHIFT (9) -#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) - #define DMA_PTE_READ (1) #define DMA_PTE_WRITE (2) -#define DMA_PTE_LARGE_PAGE (1 << 7) #define DMA_PTE_SNP (1 << 11) #define CONTEXT_TT_MULTI_LEVEL 0 diff --git a/trunk/include/linux/genhd.h b/trunk/include/linux/genhd.h index 300d7582006e..b78956b3c2e7 100644 --- a/trunk/include/linux/genhd.h +++ b/trunk/include/linux/genhd.h @@ -100,7 +100,6 @@ struct hd_struct { sector_t start_sect; sector_t nr_sects; sector_t alignment_offset; - unsigned int discard_alignment; struct device __dev; struct kobject *holder_dir; int policy, partno; diff --git a/trunk/include/linux/ieee80211.h b/trunk/include/linux/ieee80211.h index bf56b6f78270..b2eee5879883 100644 --- a/trunk/include/linux/ieee80211.h +++ b/trunk/include/linux/ieee80211.h @@ -1003,12 +1003,8 @@ struct ieee80211_ht_info { #define WLAN_CAPABILITY_ESS (1<<0) #define WLAN_CAPABILITY_IBSS (1<<1) -/* - * A mesh STA sets the ESS and IBSS capability bits to zero. - * however, this holds true for p2p probe responses (in the p2p_find - * phase) as well. - */ -#define WLAN_CAPABILITY_IS_STA_BSS(cap) \ +/* A mesh STA sets the ESS and IBSS capability bits to zero */ +#define WLAN_CAPABILITY_IS_MBSS(cap) \ (!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS))) #define WLAN_CAPABILITY_CF_POLLABLE (1<<2) diff --git a/trunk/include/linux/if_packet.h b/trunk/include/linux/if_packet.h index 6d66ce1791a9..72bfa5a034dd 100644 --- a/trunk/include/linux/if_packet.h +++ b/trunk/include/linux/if_packet.h @@ -70,7 +70,6 @@ struct tpacket_auxdata { #define TP_STATUS_COPY 0x2 #define TP_STATUS_LOSING 0x4 #define TP_STATUS_CSUMNOTREADY 0x8 -#define TP_STATUS_VLAN_VALID 0x10 /* auxdata has valid tp_vlan_tci */ /* Tx ring - header status */ #define TP_STATUS_AVAILABLE 0x0 diff --git a/trunk/include/linux/mtd/physmap.h b/trunk/include/linux/mtd/physmap.h index e5f21d293c70..d40bfa1d9c91 100644 --- a/trunk/include/linux/mtd/physmap.h +++ b/trunk/include/linux/mtd/physmap.h @@ -19,7 +19,6 @@ #include struct map_info; -struct platform_device; struct physmap_flash_data { unsigned int width; diff --git a/trunk/include/linux/tty_ldisc.h b/trunk/include/linux/tty_ldisc.h index ff7dc08696a8..5b07792ccb46 100644 --- a/trunk/include/linux/tty_ldisc.h +++ b/trunk/include/linux/tty_ldisc.h @@ -76,7 +76,7 @@ * tty device. It is solely the responsibility of the line * discipline to handle poll requests. * - * void (*receive_buf)(struct tty_struct *, const unsigned char *cp, + * unsigned int (*receive_buf)(struct tty_struct *, const unsigned char *cp, * char *fp, int count); * * This function is called by the low-level tty driver to send @@ -84,7 +84,8 @@ * processing. is a pointer to the buffer of input * character received by the device. is a pointer to a * pointer of flag bytes which indicate whether a character was - * received with a parity error, etc. + * received with a parity error, etc. Returns the amount of bytes + * received. * * void (*write_wakeup)(struct tty_struct *); * @@ -140,8 +141,8 @@ struct tty_ldisc_ops { /* * The following routines are called from below. */ - void (*receive_buf)(struct tty_struct *, const unsigned char *cp, - char *fp, int count); + unsigned int (*receive_buf)(struct tty_struct *, + const unsigned char *cp, char *fp, int count); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, unsigned int, struct pps_event_time *); diff --git a/trunk/include/linux/virtio.h b/trunk/include/linux/virtio.h index 710885749605..aff5b4f74041 100644 --- a/trunk/include/linux/virtio.h +++ b/trunk/include/linux/virtio.h @@ -51,13 +51,6 @@ struct virtqueue { * This re-enables callbacks; it returns "false" if there are pending * buffers in the queue, to detect a possible race between the driver * checking for more work, and enabling callbacks. - * virtqueue_enable_cb_delayed: restart callbacks after disable_cb. - * vq: the struct virtqueue we're talking about. - * This re-enables callbacks but hints to the other side to delay - * interrupts until most of the available buffers have been processed; - * it returns "false" if there are many pending buffers in the queue, - * to detect a possible race between the driver checking for more work, - * and enabling callbacks. * virtqueue_detach_unused_buf: detach first unused buffer * vq: the struct virtqueue we're talking about. * Returns NULL or the "data" token handed to add_buf @@ -93,8 +86,6 @@ void virtqueue_disable_cb(struct virtqueue *vq); bool virtqueue_enable_cb(struct virtqueue *vq); -bool virtqueue_enable_cb_delayed(struct virtqueue *vq); - void *virtqueue_detach_unused_buf(struct virtqueue *vq); /** diff --git a/trunk/include/linux/virtio_9p.h b/trunk/include/linux/virtio_9p.h index 277c4ad44e84..e68b439b2860 100644 --- a/trunk/include/linux/virtio_9p.h +++ b/trunk/include/linux/virtio_9p.h @@ -1,30 +1,7 @@ #ifndef _LINUX_VIRTIO_9P_H #define _LINUX_VIRTIO_9P_H /* This header is BSD licensed so anyone can use the definitions to implement - * compatible drivers/servers. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of IBM nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. */ + * compatible drivers/servers. */ #include #include #include diff --git a/trunk/include/linux/virtio_balloon.h b/trunk/include/linux/virtio_balloon.h index 652dc8bea921..a50ecd1b81a2 100644 --- a/trunk/include/linux/virtio_balloon.h +++ b/trunk/include/linux/virtio_balloon.h @@ -1,30 +1,7 @@ #ifndef _LINUX_VIRTIO_BALLOON_H #define _LINUX_VIRTIO_BALLOON_H /* This header is BSD licensed so anyone can use the definitions to implement - * compatible drivers/servers. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of IBM nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. */ + * compatible drivers/servers. */ #include #include diff --git a/trunk/include/linux/virtio_blk.h b/trunk/include/linux/virtio_blk.h index e0edb40ca7aa..167720d695ed 100644 --- a/trunk/include/linux/virtio_blk.h +++ b/trunk/include/linux/virtio_blk.h @@ -1,30 +1,7 @@ #ifndef _LINUX_VIRTIO_BLK_H #define _LINUX_VIRTIO_BLK_H /* This header is BSD licensed so anyone can use the definitions to implement - * compatible drivers/servers. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of IBM nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. */ + * compatible drivers/servers. */ #include #include #include diff --git a/trunk/include/linux/virtio_config.h b/trunk/include/linux/virtio_config.h index 39c88c5ad19d..800617b4ddd5 100644 --- a/trunk/include/linux/virtio_config.h +++ b/trunk/include/linux/virtio_config.h @@ -1,30 +1,7 @@ #ifndef _LINUX_VIRTIO_CONFIG_H #define _LINUX_VIRTIO_CONFIG_H /* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so - * anyone can use the definitions to implement compatible drivers/servers. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of IBM nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. */ + * anyone can use the definitions to implement compatible drivers/servers. */ /* Virtio devices use a standardized configuration space to define their * features and pass configuration information, but each implementation can diff --git a/trunk/include/linux/virtio_console.h b/trunk/include/linux/virtio_console.h index bdf4b0034739..e4d333543a33 100644 --- a/trunk/include/linux/virtio_console.h +++ b/trunk/include/linux/virtio_console.h @@ -5,31 +5,7 @@ #include /* * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so - * anyone can use the definitions to implement compatible drivers/servers: - * - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of IBM nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. + * anyone can use the definitions to implement compatible drivers/servers. * * Copyright (C) Red Hat, Inc., 2009, 2010, 2011 * Copyright (C) Amit Shah , 2009, 2010, 2011 diff --git a/trunk/include/linux/virtio_ids.h b/trunk/include/linux/virtio_ids.h index 85bb0bb66ffc..06660c0a78d7 100644 --- a/trunk/include/linux/virtio_ids.h +++ b/trunk/include/linux/virtio_ids.h @@ -5,29 +5,7 @@ * * This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of IBM nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. */ + */ #define VIRTIO_ID_NET 1 /* virtio net */ #define VIRTIO_ID_BLOCK 2 /* virtio block */ diff --git a/trunk/include/linux/virtio_net.h b/trunk/include/linux/virtio_net.h index 136040bba3e3..085e42298ce5 100644 --- a/trunk/include/linux/virtio_net.h +++ b/trunk/include/linux/virtio_net.h @@ -1,30 +1,7 @@ #ifndef _LINUX_VIRTIO_NET_H #define _LINUX_VIRTIO_NET_H /* This header is BSD licensed so anyone can use the definitions to implement - * compatible drivers/servers. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of IBM nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. */ + * compatible drivers/servers. */ #include #include #include diff --git a/trunk/include/linux/virtio_pci.h b/trunk/include/linux/virtio_pci.h index ea66f3f60d63..9a3d7c48c622 100644 --- a/trunk/include/linux/virtio_pci.h +++ b/trunk/include/linux/virtio_pci.h @@ -11,29 +11,6 @@ * * This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of IBM nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. */ #ifndef _LINUX_VIRTIO_PCI_H diff --git a/trunk/include/linux/virtio_ring.h b/trunk/include/linux/virtio_ring.h index 4a32cb6da425..e4d144b132b5 100644 --- a/trunk/include/linux/virtio_ring.h +++ b/trunk/include/linux/virtio_ring.h @@ -7,29 +7,6 @@ * This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of IBM nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * * Copyright Rusty Russell IBM Corporation 2007. */ #include @@ -52,12 +29,6 @@ /* We support indirect buffer descriptors */ #define VIRTIO_RING_F_INDIRECT_DESC 28 -/* The Guest publishes the used index for which it expects an interrupt - * at the end of the avail ring. Host should ignore the avail->flags field. */ -/* The Host publishes the avail index for which it expects a kick - * at the end of the used ring. Guest should ignore the used->flags field. */ -#define VIRTIO_RING_F_EVENT_IDX 29 - /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ struct vring_desc { /* Address (guest-physical). */ @@ -112,7 +83,6 @@ struct vring { * __u16 avail_flags; * __u16 avail_idx; * __u16 available[num]; - * __u16 used_event_idx; * * // Padding to the next align boundary. * char pad[]; @@ -121,14 +91,8 @@ struct vring { * __u16 used_flags; * __u16 used_idx; * struct vring_used_elem used[num]; - * __u16 avail_event_idx; * }; */ -/* We publish the used event index at the end of the available ring, and vice - * versa. They are at the end for backwards compatibility. */ -#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) -#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num]) - static inline void vring_init(struct vring *vr, unsigned int num, void *p, unsigned long align) { @@ -143,21 +107,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align) { return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num) + align - 1) & ~(align - 1)) - + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; -} - -/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ -/* Assuming a given event_idx value from the other size, if - * we have just incremented index from old to new_idx, - * should we trigger an event? */ -static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) -{ - /* Note: Xen has similar logic for notification hold-off - * in include/xen/interface/io/ring.h with req_event and req_prod - * corresponding to event_idx + 1 and new_idx respectively. - * Note also that req_event and req_prod in Xen start at 1, - * event indexes in virtio start at 0. */ - return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old); + + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num; } #ifdef __KERNEL__ diff --git a/trunk/include/net/sctp/command.h b/trunk/include/net/sctp/command.h index dd6847e5d6e4..2b447646ce4b 100644 --- a/trunk/include/net/sctp/command.h +++ b/trunk/include/net/sctp/command.h @@ -107,7 +107,6 @@ typedef enum { SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ SCTP_CMD_SEND_MSG, /* Send the whole use message */ SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ - SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ SCTP_CMD_LAST } sctp_verb_t; diff --git a/trunk/include/net/sctp/structs.h b/trunk/include/net/sctp/structs.h index 7df327a6d564..795f4886e111 100644 --- a/trunk/include/net/sctp/structs.h +++ b/trunk/include/net/sctp/structs.h @@ -1993,7 +1993,7 @@ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc); struct sctp_chunk *sctp_assoc_lookup_asconf_ack( const struct sctp_association *asoc, __be32 serial); -void sctp_asconf_queue_teardown(struct sctp_association *asoc); + int sctp_cmp_addr_exact(const union sctp_addr *ss1, const union sctp_addr *ss2); diff --git a/trunk/include/trace/events/net.h b/trunk/include/trace/events/net.h index f99645d05a8f..5f247f5ffc56 100644 --- a/trunk/include/trace/events/net.h +++ b/trunk/include/trace/events/net.h @@ -12,24 +12,22 @@ TRACE_EVENT(net_dev_xmit, TP_PROTO(struct sk_buff *skb, - int rc, - struct net_device *dev, - unsigned int skb_len), + int rc), - TP_ARGS(skb, rc, dev, skb_len), + TP_ARGS(skb, rc), TP_STRUCT__entry( __field( void *, skbaddr ) __field( unsigned int, len ) __field( int, rc ) - __string( name, dev->name ) + __string( name, skb->dev->name ) ), TP_fast_assign( __entry->skbaddr = skb; - __entry->len = skb_len; + __entry->len = skb->len; __entry->rc = rc; - __assign_str(name, dev->name); + __assign_str(name, skb->dev->name); ), TP_printk("dev=%s skbaddr=%p len=%u rc=%d", diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index 89419ff92e99..77a7671dd147 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -1648,6 +1648,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) if (IS_ERR(t)) return PTR_ERR(t); kthread_bind(t, cpu); + set_task_state(t, TASK_INTERRUPTIBLE); per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); per_cpu(rcu_cpu_kthread_task, cpu) = t; @@ -1755,6 +1756,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, if (IS_ERR(t)) return PTR_ERR(t); raw_spin_lock_irqsave(&rnp->lock, flags); + set_task_state(t, TASK_INTERRUPTIBLE); rnp->node_kthread_task = t; raw_spin_unlock_irqrestore(&rnp->lock, flags); sp.sched_priority = 99; @@ -1763,8 +1765,6 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); } -static void rcu_wake_one_boost_kthread(struct rcu_node *rnp); - /* * Spawn all kthreads -- called as soon as the scheduler is running. */ @@ -1772,30 +1772,18 @@ static int __init rcu_spawn_kthreads(void) { int cpu; struct rcu_node *rnp; - struct task_struct *t; rcu_kthreads_spawnable = 1; for_each_possible_cpu(cpu) { per_cpu(rcu_cpu_has_work, cpu) = 0; - if (cpu_online(cpu)) { + if (cpu_online(cpu)) (void)rcu_spawn_one_cpu_kthread(cpu); - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (t) - wake_up_process(t); - } } rnp = rcu_get_root(rcu_state); (void)rcu_spawn_one_node_kthread(rcu_state, rnp); - if (rnp->node_kthread_task) - wake_up_process(rnp->node_kthread_task); if (NUM_RCU_NODES > 1) { - rcu_for_each_leaf_node(rcu_state, rnp) { + rcu_for_each_leaf_node(rcu_state, rnp) (void)rcu_spawn_one_node_kthread(rcu_state, rnp); - t = rnp->node_kthread_task; - if (t) - wake_up_process(t); - rcu_wake_one_boost_kthread(rnp); - } } return 0; } @@ -2200,14 +2188,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) raw_spin_unlock_irqrestore(&rsp->onofflock, flags); } -static void __cpuinit rcu_prepare_cpu(int cpu) +static void __cpuinit rcu_online_cpu(int cpu) { rcu_init_percpu_data(cpu, &rcu_sched_state, 0); rcu_init_percpu_data(cpu, &rcu_bh_state, 0); rcu_preempt_init_percpu_data(cpu); } -static void __cpuinit rcu_prepare_kthreads(int cpu) +static void __cpuinit rcu_online_kthreads(int cpu) { struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); struct rcu_node *rnp = rdp->mynode; @@ -2220,31 +2208,6 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) } } -/* - * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state, - * but the RCU threads are woken on demand, and if demand is low this - * could be a while triggering the hung task watchdog. - * - * In order to avoid this, poke all tasks once the CPU is fully - * up and running. - */ -static void __cpuinit rcu_online_kthreads(int cpu) -{ - struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); - struct rcu_node *rnp = rdp->mynode; - struct task_struct *t; - - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (t) - wake_up_process(t); - - t = rnp->node_kthread_task; - if (t) - wake_up_process(t); - - rcu_wake_one_boost_kthread(rnp); -} - /* * Handle CPU online/offline notification events. */ @@ -2258,11 +2221,10 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - rcu_prepare_cpu(cpu); - rcu_prepare_kthreads(cpu); + rcu_online_cpu(cpu); + rcu_online_kthreads(cpu); break; case CPU_ONLINE: - rcu_online_kthreads(cpu); case CPU_DOWN_FAILED: rcu_node_kthread_setaffinity(rnp, -1); rcu_cpu_kthread_setrt(cpu, 1); diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index c8bff3099a89..a767b7dac365 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -1295,6 +1295,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, if (IS_ERR(t)) return PTR_ERR(t); raw_spin_lock_irqsave(&rnp->lock, flags); + set_task_state(t, TASK_INTERRUPTIBLE); rnp->boost_kthread_task = t; raw_spin_unlock_irqrestore(&rnp->lock, flags); sp.sched_priority = RCU_KTHREAD_PRIO; @@ -1302,12 +1303,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, return 0; } -static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) -{ - if (rnp->boost_kthread_task) - wake_up_process(rnp->boost_kthread_task); -} - #else /* #ifdef CONFIG_RCU_BOOST */ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) @@ -1331,10 +1326,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, return 0; } -static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) -{ -} - #endif /* #else #ifdef CONFIG_RCU_BOOST */ #ifndef CONFIG_SMP diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index dd373c8ee943..28afa4c5333c 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -697,7 +697,7 @@ config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT depends on BUG depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ - FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE + FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 default y help Say Y here to make BUG() panics output the file name and line number diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index 4e8985acdab8..a4e1db3f1981 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -2247,6 +2247,10 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (should_fail_alloc_page(gfp_mask, order)) return NULL; +#ifndef CONFIG_ZONE_DMA + if (WARN_ON_ONCE(gfp_mask & __GFP_DMA)) + return NULL; +#endif /* * Check the zones suitable for the gfp_mask contain at least one diff --git a/trunk/net/8021q/vlan_dev.c b/trunk/net/8021q/vlan_dev.c index 7ea5cf9ea08a..f247f5bff88d 100644 --- a/trunk/net/8021q/vlan_dev.c +++ b/trunk/net/8021q/vlan_dev.c @@ -165,7 +165,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += len; - u64_stats_update_end(&stats->syncp); + u64_stats_update_begin(&stats->syncp); } else { this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped); } diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c index e64a1c2df238..a86f9ba4f05c 100644 --- a/trunk/net/bluetooth/l2cap_core.c +++ b/trunk/net/bluetooth/l2cap_core.c @@ -906,7 +906,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr if (c->psm == psm) { /* Exact match. */ if (!bacmp(&bt_sk(sk)->src, src)) { - read_unlock(&chan_list_lock); + read_unlock_bh(&chan_list_lock); return c; } diff --git a/trunk/net/caif/chnl_net.c b/trunk/net/caif/chnl_net.c index adbb424403d4..649ebacaf6bc 100644 --- a/trunk/net/caif/chnl_net.c +++ b/trunk/net/caif/chnl_net.c @@ -139,14 +139,17 @@ static void close_work(struct work_struct *work) struct chnl_net *dev = NULL; struct list_head *list_node; struct list_head *_tmp; - - rtnl_lock(); + /* May be called with or without RTNL lock held */ + int islocked = rtnl_is_locked(); + if (!islocked) + rtnl_lock(); list_for_each_safe(list_node, _tmp, &chnl_net_list) { dev = list_entry(list_node, struct chnl_net, list_field); if (dev->state == CAIF_SHUTDOWN) dev_close(dev->netdev); } - rtnl_unlock(); + if (!islocked) + rtnl_unlock(); } static DECLARE_WORK(close_worker, close_work); diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index 939307891e71..c7e305d13b71 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -2096,7 +2096,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, { const struct net_device_ops *ops = dev->netdev_ops; int rc = NETDEV_TX_OK; - unsigned int skb_len; if (likely(!skb->next)) { u32 features; @@ -2147,9 +2146,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, } } - skb_len = skb->len; rc = ops->ndo_start_xmit(skb, dev); - trace_net_dev_xmit(skb, rc, dev, skb_len); + trace_net_dev_xmit(skb, rc); if (rc == NETDEV_TX_OK) txq_trans_update(txq); return rc; @@ -2169,9 +2167,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(nskb); - skb_len = nskb->len; rc = ops->ndo_start_xmit(nskb, dev); - trace_net_dev_xmit(nskb, rc, dev, skb_len); + trace_net_dev_xmit(nskb, rc); if (unlikely(rc != NETDEV_TX_OK)) { if (rc & ~NETDEV_TX_MASK) goto out_kfree_gso_skb; diff --git a/trunk/net/ipv4/af_inet.c b/trunk/net/ipv4/af_inet.c index 9c1926027a26..cc1463156cd0 100644 --- a/trunk/net/ipv4/af_inet.c +++ b/trunk/net/ipv4/af_inet.c @@ -465,9 +465,6 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (addr_len < sizeof(struct sockaddr_in)) goto out; - if (addr->sin_family != AF_INET) - goto out; - chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); /* Not specified by any standard per-se, however it breaks too diff --git a/trunk/net/ipv4/ip_options.c b/trunk/net/ipv4/ip_options.c index ec93335901dd..c3118e1cd3bb 100644 --- a/trunk/net/ipv4/ip_options.c +++ b/trunk/net/ipv4/ip_options.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -351,7 +350,7 @@ int ip_options_compile(struct net *net, goto error; } if (optptr[2] <= optlen) { - unsigned char *timeptr = NULL; + __be32 *timeptr = NULL; if (optptr[2]+3 > optptr[1]) { pp_ptr = optptr + 2; goto error; @@ -360,7 +359,7 @@ int ip_options_compile(struct net *net, case IPOPT_TS_TSONLY: opt->ts = optptr - iph; if (skb) - timeptr = &optptr[optptr[2]-1]; + timeptr = (__be32*)&optptr[optptr[2]-1]; opt->ts_needtime = 1; optptr[2] += 4; break; @@ -372,7 +371,7 @@ int ip_options_compile(struct net *net, opt->ts = optptr - iph; if (rt) { memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); - timeptr = &optptr[optptr[2]+3]; + timeptr = (__be32*)&optptr[optptr[2]+3]; } opt->ts_needaddr = 1; opt->ts_needtime = 1; @@ -390,7 +389,7 @@ int ip_options_compile(struct net *net, if (inet_addr_type(net, addr) == RTN_UNICAST) break; if (skb) - timeptr = &optptr[optptr[2]+3]; + timeptr = (__be32*)&optptr[optptr[2]+3]; } opt->ts_needtime = 1; optptr[2] += 8; @@ -404,10 +403,10 @@ int ip_options_compile(struct net *net, } if (timeptr) { struct timespec tv; - u32 midtime; + __be32 midtime; getnstimeofday(&tv); - midtime = (tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC; - put_unaligned_be32(midtime, timeptr); + midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); + memcpy(timeptr, &midtime, sizeof(__be32)); opt->is_changed = 1; } } else { diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c index 456cccf26b51..4f6b2675e41d 100644 --- a/trunk/net/mac80211/mlme.c +++ b/trunk/net/mac80211/mlme.c @@ -232,9 +232,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); } - ieee80211_stop_queues_by_reason(&sdata->local->hw, - IEEE80211_QUEUE_STOP_REASON_CSA); - /* channel_type change automatically detected */ ieee80211_hw_config(local, 0); @@ -248,9 +245,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, rcu_read_unlock(); } - ieee80211_wake_queues_by_reason(&sdata->local->hw, - IEEE80211_QUEUE_STOP_REASON_CSA); - ht_opmode = le16_to_cpu(hti->operation_mode); /* if bss configuration changed store the new one */ @@ -1095,7 +1089,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, local->hw.conf.flags &= ~IEEE80211_CONF_PS; config_changed |= IEEE80211_CONF_CHANGE_PS; } - local->ps_sdata = NULL; ieee80211_hw_config(local, config_changed); diff --git a/trunk/net/mac80211/scan.c b/trunk/net/mac80211/scan.c index 58ffa7d069c7..27af6723cb5e 100644 --- a/trunk/net/mac80211/scan.c +++ b/trunk/net/mac80211/scan.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/net/packet/af_packet.c b/trunk/net/packet/af_packet.c index ba248d93399a..925f715686a5 100644 --- a/trunk/net/packet/af_packet.c +++ b/trunk/net/packet/af_packet.c @@ -798,12 +798,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, getnstimeofday(&ts); h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; - if (vlan_tx_tag_present(skb)) { - h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); - status |= TP_STATUS_VLAN_VALID; - } else { - h.h2->tp_vlan_tci = 0; - } + h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); hdrlen = sizeof(*h.h2); break; default: @@ -1730,12 +1725,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, aux.tp_snaplen = skb->len; aux.tp_mac = 0; aux.tp_net = skb_network_offset(skb); - if (vlan_tx_tag_present(skb)) { - aux.tp_vlan_tci = vlan_tx_tag_get(skb); - aux.tp_status |= TP_STATUS_VLAN_VALID; - } else { - aux.tp_vlan_tci = 0; - } + aux.tp_vlan_tci = vlan_tx_tag_get(skb); + put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } diff --git a/trunk/net/sctp/associola.c b/trunk/net/sctp/associola.c index 4a62888f2e43..525f97c467e9 100644 --- a/trunk/net/sctp/associola.c +++ b/trunk/net/sctp/associola.c @@ -444,7 +444,15 @@ void sctp_association_free(struct sctp_association *asoc) asoc->peer.transport_count = 0; - sctp_asconf_queue_teardown(asoc); + /* Free any cached ASCONF_ACK chunk. */ + sctp_assoc_free_asconf_acks(asoc); + + /* Free the ASCONF queue. */ + sctp_assoc_free_asconf_queue(asoc); + + /* Free any cached ASCONF chunk. */ + if (asoc->addip_last_asconf) + sctp_chunk_free(asoc->addip_last_asconf); /* AUTH - Free the endpoint shared keys */ sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); @@ -1638,16 +1646,3 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack( return NULL; } - -void sctp_asconf_queue_teardown(struct sctp_association *asoc) -{ - /* Free any cached ASCONF_ACK chunk. */ - sctp_assoc_free_asconf_acks(asoc); - - /* Free the ASCONF queue. */ - sctp_assoc_free_asconf_queue(asoc); - - /* Free any cached ASCONF chunk. */ - if (asoc->addip_last_asconf) - sctp_chunk_free(asoc->addip_last_asconf); -} diff --git a/trunk/net/sctp/sm_sideeffect.c b/trunk/net/sctp/sm_sideeffect.c index 534c2e5feb05..d612ca1ca6c0 100644 --- a/trunk/net/sctp/sm_sideeffect.c +++ b/trunk/net/sctp/sm_sideeffect.c @@ -1670,9 +1670,6 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, case SCTP_CMD_SEND_NEXT_ASCONF: sctp_cmd_send_asconf(asoc); break; - case SCTP_CMD_PURGE_ASCONF_QUEUE: - sctp_asconf_queue_teardown(asoc); - break; default: pr_warn("Impossible command: %u, %p\n", cmd->verb, cmd->obj.ptr); diff --git a/trunk/net/sctp/sm_statefuns.c b/trunk/net/sctp/sm_statefuns.c index a297283154d5..7f4a4f8368ee 100644 --- a/trunk/net/sctp/sm_statefuns.c +++ b/trunk/net/sctp/sm_statefuns.c @@ -1718,21 +1718,11 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep, return SCTP_DISPOSITION_CONSUME; } - /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked - * data. Consider the optional choice of resending of this data. + /* For now, fail any unsent/unacked data. Consider the optional + * choice of resending of this data. */ - sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL()); - sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, - SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL()); - /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue - * and ASCONF-ACK cache. - */ - sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, - SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); - sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL()); - repl = sctp_make_cookie_ack(new_asoc, chunk); if (!repl) goto nomem; diff --git a/trunk/net/wireless/nl80211.c b/trunk/net/wireless/nl80211.c index 88a565f130a5..ec83f413a7ed 100644 --- a/trunk/net/wireless/nl80211.c +++ b/trunk/net/wireless/nl80211.c @@ -3406,12 +3406,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) i = 0; if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { - request->ssids[i].ssid_len = nla_len(attr); if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; goto out_free; } memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); + request->ssids[i].ssid_len = nla_len(attr); i++; } } @@ -3572,7 +3572,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { - request->ssids[i].ssid_len = nla_len(attr); if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; @@ -3580,6 +3579,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, } memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); + request->ssids[i].ssid_len = nla_len(attr); i++; } } diff --git a/trunk/net/wireless/scan.c b/trunk/net/wireless/scan.c index 7a6c67667d70..73a441d237b5 100644 --- a/trunk/net/wireless/scan.c +++ b/trunk/net/wireless/scan.c @@ -267,35 +267,13 @@ static bool is_bss(struct cfg80211_bss *a, return memcmp(ssidie + 2, ssid, ssid_len) == 0; } -static bool is_mesh_bss(struct cfg80211_bss *a) -{ - const u8 *ie; - - if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability)) - return false; - - ie = cfg80211_find_ie(WLAN_EID_MESH_ID, - a->information_elements, - a->len_information_elements); - if (!ie) - return false; - - ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, - a->information_elements, - a->len_information_elements); - if (!ie) - return false; - - return true; -} - static bool is_mesh(struct cfg80211_bss *a, const u8 *meshid, size_t meshidlen, const u8 *meshcfg) { const u8 *ie; - if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability)) + if (!WLAN_CAPABILITY_IS_MBSS(a->capability)) return false; ie = cfg80211_find_ie(WLAN_EID_MESH_ID, @@ -333,7 +311,7 @@ static int cmp_bss(struct cfg80211_bss *a, if (a->channel != b->channel) return b->channel->center_freq - a->channel->center_freq; - if (is_mesh_bss(a) && is_mesh_bss(b)) { + if (WLAN_CAPABILITY_IS_MBSS(a->capability | b->capability)) { r = cmp_ies(WLAN_EID_MESH_ID, a->information_elements, a->len_information_elements, @@ -479,6 +457,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, struct cfg80211_internal_bss *res) { struct cfg80211_internal_bss *found = NULL; + const u8 *meshid, *meshcfg; /* * The reference to "res" is donated to this function. @@ -491,6 +470,22 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, res->ts = jiffies; + if (WLAN_CAPABILITY_IS_MBSS(res->pub.capability)) { + /* must be mesh, verify */ + meshid = cfg80211_find_ie(WLAN_EID_MESH_ID, + res->pub.information_elements, + res->pub.len_information_elements); + meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, + res->pub.information_elements, + res->pub.len_information_elements); + if (!meshid || !meshcfg || + meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) { + /* bogus mesh */ + kref_put(&res->ref, bss_release); + return NULL; + } + } + spin_lock_bh(&dev->bss_lock); found = rb_find_bss(dev, res); diff --git a/trunk/security/apparmor/lsm.c b/trunk/security/apparmor/lsm.c index ec1bcecf2cda..ae3a698415e6 100644 --- a/trunk/security/apparmor/lsm.c +++ b/trunk/security/apparmor/lsm.c @@ -593,8 +593,7 @@ static int apparmor_setprocattr(struct task_struct *task, char *name, sa.aad.op = OP_SETPROCATTR; sa.aad.info = name; sa.aad.error = -EINVAL; - return aa_audit(AUDIT_APPARMOR_DENIED, - __aa_current_profile(), GFP_KERNEL, + return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL, &sa, NULL); } } else if (strcmp(name, "exec") == 0) { diff --git a/trunk/sound/soc/codecs/cx20442.c b/trunk/sound/soc/codecs/cx20442.c index d68ea532cc7f..f8c663dcff02 100644 --- a/trunk/sound/soc/codecs/cx20442.c +++ b/trunk/sound/soc/codecs/cx20442.c @@ -262,14 +262,14 @@ static int v253_hangup(struct tty_struct *tty) } /* Line discipline .receive_buf() */ -static void v253_receive(struct tty_struct *tty, - const unsigned char *cp, char *fp, int count) +static unsigned int v253_receive(struct tty_struct *tty, + const unsigned char *cp, char *fp, int count) { struct snd_soc_codec *codec = tty->disc_data; struct cx20442_priv *cx20442; if (!codec) - return; + return count; cx20442 = snd_soc_codec_get_drvdata(codec); @@ -281,6 +281,8 @@ static void v253_receive(struct tty_struct *tty, codec->hw_write = (hw_write_t)tty->ops->write; codec->card->pop_time = 1; } + + return count; } /* Line discipline .write_wakeup() */ diff --git a/trunk/tools/testing/ktest/ktest.pl b/trunk/tools/testing/ktest/ktest.pl index cef28e6632b9..1fd29b2daa92 100755 --- a/trunk/tools/testing/ktest/ktest.pl +++ b/trunk/tools/testing/ktest/ktest.pl @@ -788,7 +788,7 @@ sub wait_for_input sub reboot_to { if ($reboot_type eq "grub") { - run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch && reboot)'"; + run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch; reboot)'"; return; } @@ -1480,7 +1480,7 @@ sub process_config_ignore { or dodie "Failed to read $config"; while () { - if (/^((CONFIG\S*)=.*)/) { + if (/^(.*?(CONFIG\S*)(=.*| is not set))/) { $config_ignore{$2} = $1; } } @@ -1638,7 +1638,7 @@ sub run_config_bisect { if (!$found) { # try the other half doprint "Top half produced no set configs, trying bottom half\n"; - @tophalf = @start_list[$half + 1 .. $#start_list]; + @tophalf = @start_list[$half .. $#start_list]; create_config @tophalf; read_current_config \%current_config; foreach my $config (@tophalf) { @@ -1690,7 +1690,7 @@ sub run_config_bisect { # remove half the configs we are looking at and see if # they are good. $half = int($#start_list / 2); - } while ($#start_list > 0); + } while ($half > 0); # we found a single config, try it again unless we are running manually diff --git a/trunk/tools/virtio/virtio_test.c b/trunk/tools/virtio/virtio_test.c index 74d3331bdaf9..df0c6d2c3860 100644 --- a/trunk/tools/virtio/virtio_test.c +++ b/trunk/tools/virtio/virtio_test.c @@ -197,14 +197,6 @@ const struct option longopts[] = { .name = "help", .val = 'h', }, - { - .name = "event-idx", - .val = 'E', - }, - { - .name = "no-event-idx", - .val = 'e', - }, { .name = "indirect", .val = 'I', @@ -219,17 +211,13 @@ const struct option longopts[] = { static void help() { - fprintf(stderr, "Usage: virtio_test [--help]" - " [--no-indirect]" - " [--no-event-idx]" - "\n"); + fprintf(stderr, "Usage: virtio_test [--help] [--no-indirect]\n"); } int main(int argc, char **argv) { struct vdev_info dev; - unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | - (1ULL << VIRTIO_RING_F_EVENT_IDX); + unsigned long long features = 1ULL << VIRTIO_RING_F_INDIRECT_DESC; int o; for (;;) { @@ -240,9 +228,6 @@ int main(int argc, char **argv) case '?': help(); exit(2); - case 'e': - features &= ~(1ULL << VIRTIO_RING_F_EVENT_IDX); - break; case 'h': help(); goto done;