From 29e1a5a77dba2a00d49cdcd9ca57edada15bec04 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Tue, 19 Jul 2011 15:43:16 -0500 Subject: [PATCH] --- yaml --- r: 255037 b: refs/heads/master c: 33d8881af5584fb7994f6b3d17fc11dcaf07b3b2 h: refs/heads/master i: 255035: 7c4f4662b5af132b7bd9b50344d457e7dc245d54 v: v3 --- [refs] | 2 +- trunk/Documentation/x86/boot.txt | 2 +- trunk/arch/arm/mach-davinci/board-dm365-evm.c | 4 +- trunk/arch/arm/mach-davinci/gpio.c | 21 +- trunk/arch/arm/mach-davinci/irq.c | 6 - trunk/arch/sparc/include/asm/ptrace.h | 1 + trunk/arch/x86/Kconfig | 2 +- trunk/arch/x86/kernel/reboot.c | 24 -- trunk/drivers/gpu/drm/i915/i915_drv.h | 5 +- trunk/drivers/gpu/drm/i915/i915_gem.c | 71 +++--- trunk/drivers/gpu/drm/i915/i915_gem_tiling.c | 4 +- trunk/drivers/gpu/drm/i915/intel_display.c | 15 +- trunk/fs/cifs/cifsfs.c | 100 +++++--- trunk/fs/cifs/file.c | 2 +- trunk/fs/dcache.c | 2 + trunk/fs/fscache/page.c | 14 +- trunk/fs/libfs.c | 2 +- trunk/fs/namei.c | 7 +- trunk/include/linux/sched.h | 17 +- trunk/kernel/rcutree_plugin.h | 53 ++-- trunk/kernel/sched.c | 233 +++--------------- trunk/kernel/sched_fair.c | 46 ++-- trunk/kernel/sched_features.h | 2 - trunk/kernel/signal.c | 19 +- trunk/kernel/softirq.c | 12 +- trunk/mm/vmscan.c | 3 +- trunk/net/ceph/ceph_fs.c | 17 +- 27 files changed, 232 insertions(+), 454 deletions(-) diff --git a/[refs] b/[refs] index c5b3140073e5..2ed2df9a97c4 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 2bafc7a275436da4a1ece4ecfcbacb82a0560efb +refs/heads/master: 33d8881af5584fb7994f6b3d17fc11dcaf07b3b2 diff --git a/trunk/Documentation/x86/boot.txt b/trunk/Documentation/x86/boot.txt index 7c3a8801b7ce..9b7221a86df2 100644 --- a/trunk/Documentation/x86/boot.txt +++ b/trunk/Documentation/x86/boot.txt @@ -674,7 +674,7 @@ Protocol: 2.10+ Field name: init_size Type: read -Offset/size: 0x260/4 +Offset/size: 0x25c/4 This field indicates the amount of linear contiguous memory starting at the kernel runtime start address that the kernel needs before it diff --git a/trunk/arch/arm/mach-davinci/board-dm365-evm.c b/trunk/arch/arm/mach-davinci/board-dm365-evm.c index 09a87e61ffcf..c67f684ee3e5 100644 --- a/trunk/arch/arm/mach-davinci/board-dm365-evm.c +++ b/trunk/arch/arm/mach-davinci/board-dm365-evm.c @@ -520,7 +520,7 @@ static void __init evm_init_cpld(void) */ if (have_imager()) { label = "HD imager"; - mux |= 2; + mux |= 1; /* externally mux MMC1/ENET/AIC33 to imager */ mux |= BIT(6) | BIT(5) | BIT(3); @@ -540,7 +540,7 @@ static void __init evm_init_cpld(void) resets &= ~BIT(1); if (have_tvp7002()) { - mux |= 1; + mux |= 2; resets &= ~BIT(2); label = "tvp7002 HD"; } else { diff --git a/trunk/arch/arm/mach-davinci/gpio.c b/trunk/arch/arm/mach-davinci/gpio.c index cafbe13a82a5..e7221398e5af 100644 --- a/trunk/arch/arm/mach-davinci/gpio.c +++ b/trunk/arch/arm/mach-davinci/gpio.c @@ -254,10 +254,8 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct davinci_gpio_regs __iomem *g; u32 mask = 0xffff; - struct davinci_gpio_controller *d; - d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc); - g = (struct davinci_gpio_regs __iomem *)d->regs; + g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc); /* we only care about one bank */ if (irq & 1) @@ -276,14 +274,11 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc) if (!status) break; __raw_writel(status, &g->intstat); - - /* now demux them to the right lowlevel handler */ - n = d->irq_base; - if (irq & 1) { - n += 16; + if (irq & 1) status >>= 16; - } + /* now demux them to the right lowlevel handler */ + n = (int)irq_get_handler_data(irq); while (status) { res = ffs(status); n += res; @@ -429,13 +424,7 @@ static int __init davinci_gpio_irq_setup(void) /* set up all irqs in this bank */ irq_set_chained_handler(bank_irq, gpio_irq_handler); - - /* - * Each chip handles 32 gpios, and each irq bank consists of 16 - * gpio irqs. Pass the irq bank's corresponding controller to - * the chained irq handler. - */ - irq_set_handler_data(bank_irq, &chips[gpio / 32]); + irq_set_handler_data(bank_irq, (__force void *)g); for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { irq_set_chip(irq, &gpio_irqchip); diff --git a/trunk/arch/arm/mach-davinci/irq.c b/trunk/arch/arm/mach-davinci/irq.c index 952dc126c390..d8c1af025931 100644 --- a/trunk/arch/arm/mach-davinci/irq.c +++ b/trunk/arch/arm/mach-davinci/irq.c @@ -52,12 +52,6 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) struct irq_chip_type *ct; gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); - if (!gc) { - pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", - __func__, irq_start); - return; - } - ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; diff --git a/trunk/arch/sparc/include/asm/ptrace.h b/trunk/arch/sparc/include/asm/ptrace.h index c7ad3fe2b252..b928b31424b1 100644 --- a/trunk/arch/sparc/include/asm/ptrace.h +++ b/trunk/arch/sparc/include/asm/ptrace.h @@ -205,6 +205,7 @@ do { current_thread_info()->syscall_noerror = 1; \ } while (0) #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) #define instruction_pointer(regs) ((regs)->tpc) +#define instruction_pointer_set(regs, val) ((regs)->tpc = (val)) #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) #define regs_return_value(regs) ((regs)->u_regs[UREG_I0]) #ifdef CONFIG_SMP diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 37357a599dca..da349723d411 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -1170,7 +1170,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" config AMD_NUMA def_bool y prompt "Old style AMD Opteron NUMA detection" - depends on X86_64 && NUMA && PCI + depends on NUMA && PCI ---help--- Enable AMD NUMA node topology detection. You should say Y here if you have a multi processor AMD system. This uses an old method to diff --git a/trunk/arch/x86/kernel/reboot.c b/trunk/arch/x86/kernel/reboot.c index 9242436e9937..4f0d46fefa7f 100644 --- a/trunk/arch/x86/kernel/reboot.c +++ b/trunk/arch/x86/kernel/reboot.c @@ -419,30 +419,6 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), }, }, - { /* Handle problems with rebooting on the Latitude E6320. */ - .callback = set_pci_reboot, - .ident = "Dell Latitude E6320", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), - }, - }, - { /* Handle problems with rebooting on the Latitude E5420. */ - .callback = set_pci_reboot, - .ident = "Dell Latitude E5420", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"), - }, - }, - { /* Handle problems with rebooting on the Latitude E6420. */ - .callback = set_pci_reboot, - .ident = "Dell Latitude E6420", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), - }, - }, { } }; diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h index ce7914c4c044..f245c588ae95 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.h +++ b/trunk/drivers/gpu/drm/i915/i915_drv.h @@ -262,7 +262,6 @@ enum intel_pch { }; #define QUIRK_PIPEA_FORCE (1<<0) -#define QUIRK_LVDS_SSC_DISABLE (1<<1) struct intel_fbdev; @@ -1195,9 +1194,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_release(struct drm_device *dev, struct drm_file *file); uint32_t -i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, - uint32_t size, - int tiling_mode); +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); /* i915_gem_gtt.c */ void i915_gem_restore_gtt_mappings(struct drm_device *dev); diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index a087e1bf0c2f..5c0d1247f453 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -1374,24 +1374,25 @@ i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) } static uint32_t -i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) +i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) { - uint32_t gtt_size; + struct drm_device *dev = obj->base.dev; + uint32_t size; if (INTEL_INFO(dev)->gen >= 4 || - tiling_mode == I915_TILING_NONE) - return size; + obj->tiling_mode == I915_TILING_NONE) + return obj->base.size; /* Previous chips need a power-of-two fence region when tiling */ if (INTEL_INFO(dev)->gen == 3) - gtt_size = 1024*1024; + size = 1024*1024; else - gtt_size = 512*1024; + size = 512*1024; - while (gtt_size < size) - gtt_size <<= 1; + while (size < obj->base.size) + size <<= 1; - return gtt_size; + return size; } /** @@ -1402,52 +1403,59 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) * potential fence register mapping. */ static uint32_t -i915_gem_get_gtt_alignment(struct drm_device *dev, - uint32_t size, - int tiling_mode) +i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) { + struct drm_device *dev = obj->base.dev; + /* * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ if (INTEL_INFO(dev)->gen >= 4 || - tiling_mode == I915_TILING_NONE) + obj->tiling_mode == I915_TILING_NONE) return 4096; /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ - return i915_gem_get_gtt_size(dev, size, tiling_mode); + return i915_gem_get_gtt_size(obj); } /** * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an * unfenced object - * @dev: the device - * @size: size of the object - * @tiling_mode: tiling mode of the object + * @obj: object to check * * Return the required GTT alignment for an object, only taking into account * unfenced tiled surface requirements. */ uint32_t -i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, - uint32_t size, - int tiling_mode) +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) { + struct drm_device *dev = obj->base.dev; + int tile_height; + /* * Minimum alignment is 4k (GTT page size) for sane hw. */ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || - tiling_mode == I915_TILING_NONE) + obj->tiling_mode == I915_TILING_NONE) return 4096; - /* Previous hardware however needs to be aligned to a power-of-two - * tile height. The simplest method for determining this is to reuse - * the power-of-tile object size. + /* + * Older chips need unfenced tiled buffers to be aligned to the left + * edge of an even tile row (where tile rows are counted as if the bo is + * placed in a fenced gtt region). */ - return i915_gem_get_gtt_size(dev, size, tiling_mode); + if (IS_GEN2(dev)) + tile_height = 16; + else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) + tile_height = 32; + else + tile_height = 8; + + return tile_height * obj->stride * 2; } int @@ -2736,16 +2744,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, return -EINVAL; } - fence_size = i915_gem_get_gtt_size(dev, - obj->base.size, - obj->tiling_mode); - fence_alignment = i915_gem_get_gtt_alignment(dev, - obj->base.size, - obj->tiling_mode); - unfenced_alignment = - i915_gem_get_unfenced_gtt_alignment(dev, - obj->base.size, - obj->tiling_mode); + fence_size = i915_gem_get_gtt_size(obj); + fence_alignment = i915_gem_get_gtt_alignment(obj); + unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj); if (alignment == 0) alignment = map_and_fenceable ? fence_alignment : diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c b/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c index 99c4faa59d8f..82d70fd9e933 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -348,9 +348,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { u32 unfenced_alignment = - i915_gem_get_unfenced_gtt_alignment(dev, - obj->base.size, - args->tiling_mode); + i915_gem_get_unfenced_gtt_alignment(obj); if (obj->gtt_offset & (unfenced_alignment - 1)) ret = i915_gem_object_unbind(obj); } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 0f1c799afea1..21b6f93fe919 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -4305,8 +4305,7 @@ static void intel_update_watermarks(struct drm_device *dev) static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { - return dev_priv->lvds_use_ssc && i915_panel_use_ssc - && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); + return dev_priv->lvds_use_ssc && i915_panel_use_ssc; } static int i9xx_crtc_mode_set(struct drm_crtc *crtc, @@ -7811,15 +7810,6 @@ static void quirk_pipea_force (struct drm_device *dev) DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); } -/* - * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason - */ -static void quirk_ssc_force_disable(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; -} - struct intel_quirk { int device; int subsystem_vendor; @@ -7848,9 +7838,6 @@ struct intel_quirk intel_quirks[] = { /* 855 & before need to leave pipe A & dpll A up */ { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, - - /* Lenovo U160 cannot use SSC on LVDS */ - { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, }; static void intel_init_quirks(struct drm_device *dev) diff --git a/trunk/fs/cifs/cifsfs.c b/trunk/fs/cifs/cifsfs.c index bc4b12ca537b..3e2989976297 100644 --- a/trunk/fs/cifs/cifsfs.c +++ b/trunk/fs/cifs/cifsfs.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include "cifsfs.h" #include "cifspdu.h" @@ -543,12 +542,14 @@ static const struct super_operations cifs_super_ops = { static struct dentry * cifs_get_root(struct smb_vol *vol, struct super_block *sb) { - struct dentry *dentry; + int xid, rc; + struct inode *inode; + struct qstr name; + struct dentry *dparent = NULL, *dchild = NULL, *alias; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); - char *full_path = NULL; - char *s, *p; + unsigned int i, full_len, len; + char *full_path = NULL, *pstart; char sep; - int xid; full_path = cifs_build_path_to_root(vol, cifs_sb, cifs_sb_master_tcon(cifs_sb)); @@ -559,32 +560,73 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) xid = GetXid(); sep = CIFS_DIR_SEP(cifs_sb); - dentry = dget(sb->s_root); - p = s = full_path; - - do { - struct inode *dir = dentry->d_inode; - struct dentry *child; - - /* skip separators */ - while (*s == sep) - s++; - if (!*s) - break; - p = s++; - /* next separator */ - while (*s && *s != sep) - s++; - - mutex_lock(&dir->i_mutex); - child = lookup_one_len(p, dentry, s - p); - mutex_unlock(&dir->i_mutex); - dput(dentry); - dentry = child; - } while (!IS_ERR(dentry)); + dparent = dget(sb->s_root); + full_len = strlen(full_path); + full_path[full_len] = sep; + pstart = full_path + 1; + + for (i = 1, len = 0; i <= full_len; i++) { + if (full_path[i] != sep || !len) { + len++; + continue; + } + + full_path[i] = 0; + cFYI(1, "get dentry for %s", pstart); + + name.name = pstart; + name.len = len; + name.hash = full_name_hash(pstart, len); + dchild = d_lookup(dparent, &name); + if (dchild == NULL) { + cFYI(1, "not exists"); + dchild = d_alloc(dparent, &name); + if (dchild == NULL) { + dput(dparent); + dparent = ERR_PTR(-ENOMEM); + goto out; + } + } + + cFYI(1, "get inode"); + if (dchild->d_inode == NULL) { + cFYI(1, "not exists"); + inode = NULL; + if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext) + rc = cifs_get_inode_info_unix(&inode, full_path, + sb, xid); + else + rc = cifs_get_inode_info(&inode, full_path, + NULL, sb, xid, NULL); + if (rc) { + dput(dchild); + dput(dparent); + dparent = ERR_PTR(rc); + goto out; + } + alias = d_materialise_unique(dchild, inode); + if (alias != NULL) { + dput(dchild); + if (IS_ERR(alias)) { + dput(dparent); + dparent = ERR_PTR(-EINVAL); /* XXX */ + goto out; + } + dchild = alias; + } + } + cFYI(1, "parent %p, child %p", dparent, dchild); + + dput(dparent); + dparent = dchild; + len = 0; + pstart = full_path + i + 1; + full_path[i] = sep; + } +out: _FreeXid(xid); kfree(full_path); - return dentry; + return dparent; } static int cifs_set_super(struct super_block *sb, void *data) diff --git a/trunk/fs/cifs/file.c b/trunk/fs/cifs/file.c index a9b4a24f2a16..bb71471a4d9d 100644 --- a/trunk/fs/cifs/file.c +++ b/trunk/fs/cifs/file.c @@ -1737,7 +1737,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, io_parms.pid = pid; io_parms.tcon = pTcon; io_parms.offset = *poffset; - io_parms.length = cur_len; + io_parms.length = len; rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &read_data, &buf_type); pSMBr = (struct smb_com_read_rsp *)read_data; diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c index fbdcbca40725..6e4ea6d87774 100644 --- a/trunk/fs/dcache.c +++ b/trunk/fs/dcache.c @@ -1813,6 +1813,8 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, tname = dentry->d_name.name; i = dentry->d_inode; prefetch(tname); + if (i) + prefetch(i); /* * This seqcount check is required to ensure name and * len are loaded atomically, so as not to walk off the diff --git a/trunk/fs/fscache/page.c b/trunk/fs/fscache/page.c index 3f7a59bfa7ad..2f343b4d7a7d 100644 --- a/trunk/fs/fscache/page.c +++ b/trunk/fs/fscache/page.c @@ -976,12 +976,16 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, pagevec_init(&pvec, 0); next = 0; - do { - if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) - break; + while (next <= (loff_t)-1 && + pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE) + ) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; - next = page->index; + pgoff_t page_index = page->index; + + ASSERTCMP(page_index, >=, next); + next = page_index + 1; + if (PageFsCache(page)) { __fscache_wait_on_page_write(cookie, page); __fscache_uncache_page(cookie, page); @@ -989,7 +993,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, } pagevec_release(&pvec); cond_resched(); - } while (++next); + } _leave(""); } diff --git a/trunk/fs/libfs.c b/trunk/fs/libfs.c index 275ca4749a2e..c88eab55aec9 100644 --- a/trunk/fs/libfs.c +++ b/trunk/fs/libfs.c @@ -822,7 +822,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, goto out; attr->set_buf[size] = '\0'; - val = simple_strtoll(attr->set_buf, NULL, 0); + val = simple_strtol(attr->set_buf, NULL, 0); ret = attr->set(attr->data, val); if (ret == 0) ret = len; /* on success, claim we got the whole input */ diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index 14ab8d3f2f0c..5c867dd1c0b3 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -942,6 +942,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ + *inode = path->dentry->d_inode; if (unlikely(managed_dentry_might_block(path->dentry))) return false; @@ -954,12 +955,6 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, path->mnt = mounted; path->dentry = mounted->mnt_root; nd->seq = read_seqcount_begin(&path->dentry->d_seq); - /* - * Update the inode too. We don't need to re-check the - * dentry sequence number here after this d_inode read, - * because a mount-point is always pinned. - */ - *inode = path->dentry->d_inode; } return true; } diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 14a6c7b545de..496770a96487 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -844,7 +844,6 @@ enum cpu_idle_type { #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ -#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ enum powersavings_balance_level { POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ @@ -894,21 +893,16 @@ static inline int sd_power_saving_flags(void) return 0; } -struct sched_group_power { +struct sched_group { + struct sched_group *next; /* Must be a circular list */ atomic_t ref; + /* * CPU power of this group, SCHED_LOAD_SCALE being max power for a * single CPU. */ - unsigned int power, power_orig; -}; - -struct sched_group { - struct sched_group *next; /* Must be a circular list */ - atomic_t ref; - + unsigned int cpu_power, cpu_power_orig; unsigned int group_weight; - struct sched_group_power *sgp; /* * The CPUs this group covers. @@ -1260,9 +1254,6 @@ struct task_struct { #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; char rcu_read_unlock_special; -#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) - int rcu_boosted; -#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */ struct list_head rcu_node_entry; #endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TREE_PREEMPT_RCU diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 8aafbb80b8b0..75113cb7c4fb 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -68,7 +68,6 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); static struct rcu_state *rcu_state = &rcu_preempt_state; -static void rcu_read_unlock_special(struct task_struct *t); static int rcu_preempted_readers_exp(struct rcu_node *rnp); /* @@ -148,7 +147,7 @@ static void rcu_preempt_note_context_switch(int cpu) struct rcu_data *rdp; struct rcu_node *rnp; - if (t->rcu_read_lock_nesting > 0 && + if (t->rcu_read_lock_nesting && (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { /* Possibly blocking in an RCU read-side critical section. */ @@ -191,14 +190,6 @@ static void rcu_preempt_note_context_switch(int cpu) rnp->gp_tasks = &t->rcu_node_entry; } raw_spin_unlock_irqrestore(&rnp->lock, flags); - } else if (t->rcu_read_lock_nesting < 0 && - t->rcu_read_unlock_special) { - - /* - * Complete exit from RCU read-side critical section on - * behalf of preempted instance of __rcu_read_unlock(). - */ - rcu_read_unlock_special(t); } /* @@ -293,7 +284,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, * notify RCU core processing or task having blocked during the RCU * read-side critical section. */ -static noinline void rcu_read_unlock_special(struct task_struct *t) +static void rcu_read_unlock_special(struct task_struct *t) { int empty; int empty_exp; @@ -318,7 +309,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) } /* Hardware IRQ handlers cannot block. */ - if (in_irq() || in_serving_softirq()) { + if (in_irq()) { local_irq_restore(flags); return; } @@ -351,11 +342,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) #ifdef CONFIG_RCU_BOOST if (&t->rcu_node_entry == rnp->boost_tasks) rnp->boost_tasks = np; - /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */ - if (t->rcu_boosted) { - special |= RCU_READ_UNLOCK_BOOSTED; - t->rcu_boosted = 0; - } #endif /* #ifdef CONFIG_RCU_BOOST */ t->rcu_blocked_node = NULL; @@ -372,6 +358,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) #ifdef CONFIG_RCU_BOOST /* Unboost if we were boosted. */ if (special & RCU_READ_UNLOCK_BOOSTED) { + t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED; rt_mutex_unlock(t->rcu_boost_mutex); t->rcu_boost_mutex = NULL; } @@ -400,22 +387,13 @@ void __rcu_read_unlock(void) struct task_struct *t = current; barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ - if (t->rcu_read_lock_nesting != 1) - --t->rcu_read_lock_nesting; - else { - t->rcu_read_lock_nesting = INT_MIN; - barrier(); /* assign before ->rcu_read_unlock_special load */ - if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) - rcu_read_unlock_special(t); - barrier(); /* ->rcu_read_unlock_special load before assign */ - t->rcu_read_lock_nesting = 0; - } + --t->rcu_read_lock_nesting; + barrier(); /* decrement before load of ->rcu_read_unlock_special */ + if (t->rcu_read_lock_nesting == 0 && + unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) + rcu_read_unlock_special(t); #ifdef CONFIG_PROVE_LOCKING - { - int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); - - WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); - } + WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); #endif /* #ifdef CONFIG_PROVE_LOCKING */ } EXPORT_SYMBOL_GPL(__rcu_read_unlock); @@ -611,8 +589,7 @@ static void rcu_preempt_check_callbacks(int cpu) rcu_preempt_qs(cpu); return; } - if (t->rcu_read_lock_nesting > 0 && - per_cpu(rcu_preempt_data, cpu).qs_pending) + if (per_cpu(rcu_preempt_data, cpu).qs_pending) t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; } @@ -718,12 +695,9 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) raw_spin_lock_irqsave(&rnp->lock, flags); for (;;) { - if (!sync_rcu_preempt_exp_done(rnp)) { - raw_spin_unlock_irqrestore(&rnp->lock, flags); + if (!sync_rcu_preempt_exp_done(rnp)) break; - } if (rnp->parent == NULL) { - raw_spin_unlock_irqrestore(&rnp->lock, flags); wake_up(&sync_rcu_preempt_exp_wq); break; } @@ -733,6 +707,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) raw_spin_lock(&rnp->lock); /* irqs already disabled */ rnp->expmask &= ~mask; } + raw_spin_unlock_irqrestore(&rnp->lock, flags); } /* @@ -1199,7 +1174,7 @@ static int rcu_boost(struct rcu_node *rnp) t = container_of(tb, struct task_struct, rcu_node_entry); rt_mutex_init_proxy_locked(&mtx, t); t->rcu_boost_mutex = &mtx; - t->rcu_boosted = 1; + t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; raw_spin_unlock_irqrestore(&rnp->lock, flags); rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index fde6ff903525..3dc716f6d8ad 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -2544,9 +2544,13 @@ static int ttwu_remote(struct task_struct *p, int wake_flags) } #ifdef CONFIG_SMP -static void sched_ttwu_do_pending(struct task_struct *list) +static void sched_ttwu_pending(void) { struct rq *rq = this_rq(); + struct task_struct *list = xchg(&rq->wake_list, NULL); + + if (!list) + return; raw_spin_lock(&rq->lock); @@ -2559,45 +2563,9 @@ static void sched_ttwu_do_pending(struct task_struct *list) raw_spin_unlock(&rq->lock); } -#ifdef CONFIG_HOTPLUG_CPU - -static void sched_ttwu_pending(void) -{ - struct rq *rq = this_rq(); - struct task_struct *list = xchg(&rq->wake_list, NULL); - - if (!list) - return; - - sched_ttwu_do_pending(list); -} - -#endif /* CONFIG_HOTPLUG_CPU */ - void scheduler_ipi(void) { - struct rq *rq = this_rq(); - struct task_struct *list = xchg(&rq->wake_list, NULL); - - if (!list) - return; - - /* - * Not all reschedule IPI handlers call irq_enter/irq_exit, since - * traditionally all their work was done from the interrupt return - * path. Now that we actually do some work, we need to make sure - * we do call them. - * - * Some archs already do call them, luckily irq_enter/exit nest - * properly. - * - * Arguably we should visit all archs and update all handlers, - * however a fair share of IPIs are still resched only so this would - * somewhat pessimize the simple resched case. - */ - irq_enter(); - sched_ttwu_do_pending(list); - irq_exit(); + sched_ttwu_pending(); } static void ttwu_queue_remote(struct task_struct *p, int cpu) @@ -6589,7 +6557,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, break; } - if (!group->sgp->power) { + if (!group->cpu_power) { printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: domain->cpu_power not " "set\n"); @@ -6613,9 +6581,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); printk(KERN_CONT " %s", str); - if (group->sgp->power != SCHED_POWER_SCALE) { + if (group->cpu_power != SCHED_POWER_SCALE) { printk(KERN_CONT " (cpu_power = %d)", - group->sgp->power); + group->cpu_power); } group = group->next; @@ -6806,39 +6774,11 @@ static struct root_domain *alloc_rootdomain(void) return rd; } -static void free_sched_groups(struct sched_group *sg, int free_sgp) -{ - struct sched_group *tmp, *first; - - if (!sg) - return; - - first = sg; - do { - tmp = sg->next; - - if (free_sgp && atomic_dec_and_test(&sg->sgp->ref)) - kfree(sg->sgp); - - kfree(sg); - sg = tmp; - } while (sg != first); -} - static void free_sched_domain(struct rcu_head *rcu) { struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); - - /* - * If its an overlapping domain it has private groups, iterate and - * nuke them all. - */ - if (sd->flags & SD_OVERLAP) { - free_sched_groups(sd->groups, 1); - } else if (atomic_dec_and_test(&sd->groups->ref)) { - kfree(sd->groups->sgp); + if (atomic_dec_and_test(&sd->groups->ref)) kfree(sd->groups); - } kfree(sd); } @@ -7005,7 +6945,6 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0; struct sd_data { struct sched_domain **__percpu sd; struct sched_group **__percpu sg; - struct sched_group_power **__percpu sgp; }; struct s_data { @@ -7025,73 +6964,15 @@ struct sched_domain_topology_level; typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); -#define SDTL_OVERLAP 0x01 - struct sched_domain_topology_level { sched_domain_init_f init; sched_domain_mask_f mask; - int flags; struct sd_data data; }; -static int -build_overlap_sched_groups(struct sched_domain *sd, int cpu) -{ - struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; - const struct cpumask *span = sched_domain_span(sd); - struct cpumask *covered = sched_domains_tmpmask; - struct sd_data *sdd = sd->private; - struct sched_domain *child; - int i; - - cpumask_clear(covered); - - for_each_cpu(i, span) { - struct cpumask *sg_span; - - if (cpumask_test_cpu(i, covered)) - continue; - - sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), - GFP_KERNEL, cpu_to_node(i)); - - if (!sg) - goto fail; - - sg_span = sched_group_cpus(sg); - - child = *per_cpu_ptr(sdd->sd, i); - if (child->child) { - child = child->child; - cpumask_copy(sg_span, sched_domain_span(child)); - } else - cpumask_set_cpu(i, sg_span); - - cpumask_or(covered, covered, sg_span); - - sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); - atomic_inc(&sg->sgp->ref); - - if (cpumask_test_cpu(cpu, sg_span)) - groups = sg; - - if (!first) - first = sg; - if (last) - last->next = sg; - last = sg; - last->next = first; - } - sd->groups = groups; - - return 0; - -fail: - free_sched_groups(first, 0); - - return -ENOMEM; -} - +/* + * Assumes the sched_domain tree is fully constructed + */ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) { struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); @@ -7100,24 +6981,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) if (child) cpu = cpumask_first(sched_domain_span(child)); - if (sg) { + if (sg) *sg = *per_cpu_ptr(sdd->sg, cpu); - (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu); - atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */ - } return cpu; } /* + * build_sched_groups takes the cpumask we wish to span, and a pointer + * to a function which identifies what group(along with sched group) a CPU + * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids + * (due to the fact that we keep track of groups covered with a struct cpumask). + * * build_sched_groups will build a circular linked list of the groups * covered by the given span, and will set each group's ->cpumask correctly, * and ->cpu_power to 0. - * - * Assumes the sched_domain tree is fully constructed */ -static int -build_sched_groups(struct sched_domain *sd, int cpu) +static void +build_sched_groups(struct sched_domain *sd) { struct sched_group *first = NULL, *last = NULL; struct sd_data *sdd = sd->private; @@ -7125,12 +7006,6 @@ build_sched_groups(struct sched_domain *sd, int cpu) struct cpumask *covered; int i; - get_group(cpu, sdd, &sd->groups); - atomic_inc(&sd->groups->ref); - - if (cpu != cpumask_first(sched_domain_span(sd))) - return 0; - lockdep_assert_held(&sched_domains_mutex); covered = sched_domains_tmpmask; @@ -7145,7 +7020,7 @@ build_sched_groups(struct sched_domain *sd, int cpu) continue; cpumask_clear(sched_group_cpus(sg)); - sg->sgp->power = 0; + sg->cpu_power = 0; for_each_cpu(j, span) { if (get_group(j, sdd, NULL) != group) @@ -7162,8 +7037,6 @@ build_sched_groups(struct sched_domain *sd, int cpu) last = sg; } last->next = first; - - return 0; } /* @@ -7178,18 +7051,13 @@ build_sched_groups(struct sched_domain *sd, int cpu) */ static void init_sched_groups_power(int cpu, struct sched_domain *sd) { - struct sched_group *sg = sd->groups; + WARN_ON(!sd || !sd->groups); - WARN_ON(!sd || !sg); - - do { - sg->group_weight = cpumask_weight(sched_group_cpus(sg)); - sg = sg->next; - } while (sg != sd->groups); - - if (cpu != group_first_cpu(sg)) + if (cpu != group_first_cpu(sd->groups)) return; + sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); + update_group_power(sd, cpu); } @@ -7309,15 +7177,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, static void claim_allocations(int cpu, struct sched_domain *sd) { struct sd_data *sdd = sd->private; + struct sched_group *sg = sd->groups; WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); *per_cpu_ptr(sdd->sd, cpu) = NULL; - if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) + if (cpu == cpumask_first(sched_group_cpus(sg))) { + WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg); *per_cpu_ptr(sdd->sg, cpu) = NULL; - - if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref)) - *per_cpu_ptr(sdd->sgp, cpu) = NULL; + } } #ifdef CONFIG_SCHED_SMT @@ -7342,7 +7210,7 @@ static struct sched_domain_topology_level default_topology[] = { #endif { sd_init_CPU, cpu_cpu_mask, }, #ifdef CONFIG_NUMA - { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, }, + { sd_init_NODE, cpu_node_mask, }, { sd_init_ALLNODES, cpu_allnodes_mask, }, #endif { NULL, }, @@ -7366,14 +7234,9 @@ static int __sdt_alloc(const struct cpumask *cpu_map) if (!sdd->sg) return -ENOMEM; - sdd->sgp = alloc_percpu(struct sched_group_power *); - if (!sdd->sgp) - return -ENOMEM; - for_each_cpu(j, cpu_map) { struct sched_domain *sd; struct sched_group *sg; - struct sched_group_power *sgp; sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), GFP_KERNEL, cpu_to_node(j)); @@ -7388,13 +7251,6 @@ static int __sdt_alloc(const struct cpumask *cpu_map) return -ENOMEM; *per_cpu_ptr(sdd->sg, j) = sg; - - sgp = kzalloc_node(sizeof(struct sched_group_power), - GFP_KERNEL, cpu_to_node(j)); - if (!sgp) - return -ENOMEM; - - *per_cpu_ptr(sdd->sgp, j) = sgp; } } @@ -7410,15 +7266,11 @@ static void __sdt_free(const struct cpumask *cpu_map) struct sd_data *sdd = &tl->data; for_each_cpu(j, cpu_map) { - struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); - if (sd && (sd->flags & SD_OVERLAP)) - free_sched_groups(sd->groups, 0); + kfree(*per_cpu_ptr(sdd->sd, j)); kfree(*per_cpu_ptr(sdd->sg, j)); - kfree(*per_cpu_ptr(sdd->sgp, j)); } free_percpu(sdd->sd); free_percpu(sdd->sg); - free_percpu(sdd->sgp); } } @@ -7464,13 +7316,8 @@ static int build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_topology_level *tl; sd = NULL; - for (tl = sched_domain_topology; tl->init; tl++) { + for (tl = sched_domain_topology; tl->init; tl++) sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); - if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) - sd->flags |= SD_OVERLAP; - if (cpumask_equal(cpu_map, sched_domain_span(sd))) - break; - } while (sd->child) sd = sd->child; @@ -7482,13 +7329,13 @@ static int build_sched_domains(const struct cpumask *cpu_map, for_each_cpu(i, cpu_map) { for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { sd->span_weight = cpumask_weight(sched_domain_span(sd)); - if (sd->flags & SD_OVERLAP) { - if (build_overlap_sched_groups(sd, i)) - goto error; - } else { - if (build_sched_groups(sd, i)) - goto error; - } + get_group(i, sd->private, &sd->groups); + atomic_inc(&sd->groups->ref); + + if (i != cpumask_first(sched_domain_span(sd))) + continue; + + build_sched_groups(sd); } } diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index c768588e180b..433491c2dc8f 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -1585,7 +1585,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, } /* Adjust by relative CPU power of the group */ - avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power; + avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power; if (local_group) { this_load = avg_load; @@ -2631,7 +2631,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) power >>= SCHED_POWER_SHIFT; } - sdg->sgp->power_orig = power; + sdg->cpu_power_orig = power; if (sched_feat(ARCH_POWER)) power *= arch_scale_freq_power(sd, cpu); @@ -2647,7 +2647,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) power = 1; cpu_rq(cpu)->cpu_power = power; - sdg->sgp->power = power; + sdg->cpu_power = power; } static void update_group_power(struct sched_domain *sd, int cpu) @@ -2665,11 +2665,11 @@ static void update_group_power(struct sched_domain *sd, int cpu) group = child->groups; do { - power += group->sgp->power; + power += group->cpu_power; group = group->next; } while (group != child->groups); - sdg->sgp->power = power; + sdg->cpu_power = power; } /* @@ -2691,7 +2691,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) /* * If ~90% of the cpu_power is still there, we're good. */ - if (group->sgp->power * 32 > group->sgp->power_orig * 29) + if (group->cpu_power * 32 > group->cpu_power_orig * 29) return 1; return 0; @@ -2771,7 +2771,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, } /* Adjust by relative CPU power of the group */ - sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power; + sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power; /* * Consider the group unbalanced when the imbalance is larger @@ -2788,7 +2788,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1) sgs->group_imb = 1; - sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power, + sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_POWER_SCALE); if (!sgs->group_capacity) sgs->group_capacity = fix_small_capacity(sd, group); @@ -2877,7 +2877,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, return; sds->total_load += sgs.group_load; - sds->total_pwr += sg->sgp->power; + sds->total_pwr += sg->cpu_power; /* * In case the child domain prefers tasks go to siblings @@ -2962,7 +2962,7 @@ static int check_asym_packing(struct sched_domain *sd, if (this_cpu > busiest_cpu) return 0; - *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power, + *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power, SCHED_POWER_SCALE); return 1; } @@ -2993,7 +2993,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds, scaled_busy_load_per_task = sds->busiest_load_per_task * SCHED_POWER_SCALE; - scaled_busy_load_per_task /= sds->busiest->sgp->power; + scaled_busy_load_per_task /= sds->busiest->cpu_power; if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= (scaled_busy_load_per_task * imbn)) { @@ -3007,28 +3007,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds, * moving them. */ - pwr_now += sds->busiest->sgp->power * + pwr_now += sds->busiest->cpu_power * min(sds->busiest_load_per_task, sds->max_load); - pwr_now += sds->this->sgp->power * + pwr_now += sds->this->cpu_power * min(sds->this_load_per_task, sds->this_load); pwr_now /= SCHED_POWER_SCALE; /* Amount of load we'd subtract */ tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / - sds->busiest->sgp->power; + sds->busiest->cpu_power; if (sds->max_load > tmp) - pwr_move += sds->busiest->sgp->power * + pwr_move += sds->busiest->cpu_power * min(sds->busiest_load_per_task, sds->max_load - tmp); /* Amount of load we'd add */ - if (sds->max_load * sds->busiest->sgp->power < + if (sds->max_load * sds->busiest->cpu_power < sds->busiest_load_per_task * SCHED_POWER_SCALE) - tmp = (sds->max_load * sds->busiest->sgp->power) / - sds->this->sgp->power; + tmp = (sds->max_load * sds->busiest->cpu_power) / + sds->this->cpu_power; else tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / - sds->this->sgp->power; - pwr_move += sds->this->sgp->power * + sds->this->cpu_power; + pwr_move += sds->this->cpu_power * min(sds->this_load_per_task, sds->this_load + tmp); pwr_move /= SCHED_POWER_SCALE; @@ -3074,7 +3074,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); - load_above_capacity /= sds->busiest->sgp->power; + load_above_capacity /= sds->busiest->cpu_power; } /* @@ -3090,8 +3090,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); /* How much load to actually move to equalise the imbalance */ - *imbalance = min(max_pull * sds->busiest->sgp->power, - (sds->avg_load - sds->this_load) * sds->this->sgp->power) + *imbalance = min(max_pull * sds->busiest->cpu_power, + (sds->avg_load - sds->this_load) * sds->this->cpu_power) / SCHED_POWER_SCALE; /* diff --git a/trunk/kernel/sched_features.h b/trunk/kernel/sched_features.h index 1e7066d76c26..be40f7371ee1 100644 --- a/trunk/kernel/sched_features.h +++ b/trunk/kernel/sched_features.h @@ -70,5 +70,3 @@ SCHED_FEAT(NONIRQ_POWER, 1) * using the scheduler IPI. Reduces rq->lock contention/bounces. */ SCHED_FEAT(TTWU_QUEUE, 1) - -SCHED_FEAT(FORCE_SD_OVERLAP, 0) diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index 415d85d6f6c6..ff7678603328 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -1178,25 +1178,18 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, { struct sighand_struct *sighand; + rcu_read_lock(); for (;;) { - local_irq_save(*flags); - rcu_read_lock(); sighand = rcu_dereference(tsk->sighand); - if (unlikely(sighand == NULL)) { - rcu_read_unlock(); - local_irq_restore(*flags); + if (unlikely(sighand == NULL)) break; - } - spin_lock(&sighand->siglock); - if (likely(sighand == tsk->sighand)) { - rcu_read_unlock(); + spin_lock_irqsave(&sighand->siglock, *flags); + if (likely(sighand == tsk->sighand)) break; - } - spin_unlock(&sighand->siglock); - rcu_read_unlock(); - local_irq_restore(*flags); + spin_unlock_irqrestore(&sighand->siglock, *flags); } + rcu_read_unlock(); return sighand; } diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index fca82c32042b..40cf63ddd4b3 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -315,24 +315,16 @@ static inline void invoke_softirq(void) { if (!force_irqthreads) __do_softirq(); - else { - __local_bh_disable((unsigned long)__builtin_return_address(0), - SOFTIRQ_OFFSET); + else wakeup_softirqd(); - __local_bh_enable(SOFTIRQ_OFFSET); - } } #else static inline void invoke_softirq(void) { if (!force_irqthreads) do_softirq(); - else { - __local_bh_disable((unsigned long)__builtin_return_address(0), - SOFTIRQ_OFFSET); + else wakeup_softirqd(); - __local_bh_enable(SOFTIRQ_OFFSET); - } } #endif diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index d036e59d302b..5ed24b94c5e6 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -2310,8 +2310,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages, for (i = 0; i <= classzone_idx; i++) present_pages += pgdat->node_zones[i].present_pages; - /* A special case here: if zone has no page, we think it's balanced */ - return balanced_pages >= (present_pages >> 2); + return balanced_pages > (present_pages >> 2); } /* is kswapd sleeping prematurely? */ diff --git a/trunk/net/ceph/ceph_fs.c b/trunk/net/ceph/ceph_fs.c index 41466ccb972a..a3a3a31d3c37 100644 --- a/trunk/net/ceph/ceph_fs.c +++ b/trunk/net/ceph/ceph_fs.c @@ -36,19 +36,16 @@ int ceph_flags_to_mode(int flags) if ((flags & O_DIRECTORY) == O_DIRECTORY) return CEPH_FILE_MODE_PIN; #endif + if ((flags & O_APPEND) == O_APPEND) + flags |= O_WRONLY; - switch (flags & O_ACCMODE) { - case O_WRONLY: + if ((flags & O_ACCMODE) == O_RDWR) + mode = CEPH_FILE_MODE_RDWR; + else if ((flags & O_ACCMODE) == O_WRONLY) mode = CEPH_FILE_MODE_WR; - break; - case O_RDONLY: + else mode = CEPH_FILE_MODE_RD; - break; - case O_RDWR: - case O_ACCMODE: /* this is what the VFS does */ - mode = CEPH_FILE_MODE_RDWR; - break; - } + #ifdef O_LAZY if (flags & O_LAZY) mode |= CEPH_FILE_MODE_LAZY;