diff --git a/[refs] b/[refs] index 37480c5af7ae..feddb9151dc9 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a16d9d25c364dc0f4f9e4af7dad829e7ced07e47 +refs/heads/master: 760a9a30adc558a75916a13902f38c6792fa8c4b diff --git a/trunk/Documentation/watchdog/src/watchdog-test.c b/trunk/Documentation/watchdog/src/watchdog-test.c index 3da822967ee0..73ff5cc93e05 100644 --- a/trunk/Documentation/watchdog/src/watchdog-test.c +++ b/trunk/Documentation/watchdog/src/watchdog-test.c @@ -31,7 +31,7 @@ static void keep_alive(void) * or "-e" to enable the card. */ -static void term(int sig) +void term(int sig) { close(fd); fprintf(stderr, "Stopping watchdog ticks...\n"); diff --git a/trunk/arch/parisc/include/asm/atomic.h b/trunk/arch/parisc/include/asm/atomic.h index af9cf30ed474..6c6defc24619 100644 --- a/trunk/arch/parisc/include/asm/atomic.h +++ b/trunk/arch/parisc/include/asm/atomic.h @@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) -#define ATOMIC_INIT(i) { (i) } +#define ATOMIC_INIT(i) ((atomic_t) { (i) }) #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() @@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -#define ATOMIC64_INIT(i) { (i) } +#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) static __inline__ s64 __atomic64_add_return(s64 i, atomic64_t *v) diff --git a/trunk/arch/parisc/kernel/process.c b/trunk/arch/parisc/kernel/process.c index 2c05a9292a81..d4b94b395c16 100644 --- a/trunk/arch/parisc/kernel/process.c +++ b/trunk/arch/parisc/kernel/process.c @@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, cregs->ksp = (unsigned long)stack + (pregs->gr[21] & (THREAD_SIZE - 1)); cregs->gr[30] = usp; - if (personality(p->personality) == PER_HPUX) { + if (p->personality == PER_HPUX) { #ifdef CONFIG_HPUX cregs->kpc = (unsigned long) &hpux_child_return; #else diff --git a/trunk/arch/parisc/kernel/sys_parisc.c b/trunk/arch/parisc/kernel/sys_parisc.c index 7426e40699bd..c9b932260f47 100644 --- a/trunk/arch/parisc/kernel/sys_parisc.c +++ b/trunk/arch/parisc/kernel/sys_parisc.c @@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality) long err; if (personality(current->personality) == PER_LINUX32 - && personality(personality) == PER_LINUX) - personality = (personality & ~PER_MASK) | PER_LINUX32; + && personality == PER_LINUX) + personality = PER_LINUX32; err = sys_personality(personality); - if (personality(err) == PER_LINUX32) - err = (err & ~PER_MASK) | PER_LINUX; + if (err == PER_LINUX32) + err = PER_LINUX; return err; } diff --git a/trunk/arch/s390/include/asm/elf.h b/trunk/arch/s390/include/asm/elf.h index 9b94a160fe7f..32e8449640fa 100644 --- a/trunk/arch/s390/include/asm/elf.h +++ b/trunk/arch/s390/include/asm/elf.h @@ -180,8 +180,7 @@ extern char elf_platform[]; #define ELF_PLATFORM (elf_platform) #ifndef CONFIG_64BIT -#define SET_PERSONALITY(ex) \ - set_personality(PER_LINUX | (current->personality & (~PER_MASK))) +#define SET_PERSONALITY(ex) set_personality(PER_LINUX) #else /* CONFIG_64BIT */ #define SET_PERSONALITY(ex) \ do { \ diff --git a/trunk/arch/s390/include/asm/posix_types.h b/trunk/arch/s390/include/asm/posix_types.h index bf2a2ad2f800..7bcc14e395f0 100644 --- a/trunk/arch/s390/include/asm/posix_types.h +++ b/trunk/arch/s390/include/asm/posix_types.h @@ -13,7 +13,6 @@ */ typedef unsigned long __kernel_size_t; -typedef long __kernel_ssize_t; #define __kernel_size_t __kernel_size_t typedef unsigned short __kernel_old_dev_t; @@ -26,6 +25,7 @@ typedef unsigned short __kernel_mode_t; typedef unsigned short __kernel_ipc_pid_t; typedef unsigned short __kernel_uid_t; typedef unsigned short __kernel_gid_t; +typedef int __kernel_ssize_t; typedef int __kernel_ptrdiff_t; #else /* __s390x__ */ @@ -35,6 +35,7 @@ typedef unsigned int __kernel_mode_t; typedef int __kernel_ipc_pid_t; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; +typedef long __kernel_ssize_t; typedef long __kernel_ptrdiff_t; typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ diff --git a/trunk/arch/s390/include/asm/smp.h b/trunk/arch/s390/include/asm/smp.h index ce26ac3cb162..a0a8340daafa 100644 --- a/trunk/arch/s390/include/asm/smp.h +++ b/trunk/arch/s390/include/asm/smp.h @@ -44,7 +44,6 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data) } static inline int smp_find_processor_id(int address) { return 0; } -static inline int smp_store_status(int cpu) { return 0; } static inline int smp_vcpu_scheduled(int cpu) { return 1; } static inline void smp_yield_cpu(int cpu) { } static inline void smp_yield(void) { } diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index 6fbfc244748f..08a7aa722d6b 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) + if (!req->flags) return -EINVAL; mutex_lock(&dev->mode_config.mutex); diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c index b7ee230572b7..a8743c399e83 100644 --- a/trunk/drivers/gpu/drm/drm_edid.c +++ b/trunk/drivers/gpu/drm/drm_edid.c @@ -87,9 +87,6 @@ static struct edid_quirk { int product_id; u32 quirks; } edid_quirk_list[] = { - /* ASUS VW222S */ - { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING }, - /* Acer AL1706 */ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, /* Acer F51 */ diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_display.c b/trunk/drivers/gpu/drm/gma500/psb_intel_display.c index 8033526bb53b..30dc22a7156c 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_display.c @@ -1362,9 +1362,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, (struct drm_connector **) (psb_intel_crtc + 1); psb_intel_crtc->mode_set.num_connectors = 0; psb_intel_cursor_init(dev, psb_intel_crtc); - - /* Set to true so that the pipe is forced off on initial config. */ - psb_intel_crtc->active = true; } int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c index 60815b861ec2..d9a5372ec56f 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 * entries. For aliasing ppgtt support we just steal them at the end for * now. */ - first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES; + first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES; ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); if (!ppgtt) diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 2dfa6cf4886b..a69a3d0d3acf 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -1384,7 +1384,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - WARN(hdmi_pipe_enabled(dev_priv, pipe, val), + WARN(hdmi_pipe_enabled(dev_priv, val, pipe), "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); @@ -1404,13 +1404,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - WARN(adpa_pipe_enabled(dev_priv, pipe, val), + WARN(adpa_pipe_enabled(dev_priv, val, pipe), "PCH VGA enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); reg = PCH_LVDS; val = I915_READ(reg); - WARN(lvds_pipe_enabled(dev_priv, pipe, val), + WARN(lvds_pipe_enabled(dev_priv, val, pipe), "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); @@ -1872,7 +1872,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - if (hdmi_pipe_enabled(dev_priv, pipe, val)) { + if (hdmi_pipe_enabled(dev_priv, val, pipe)) { DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", reg, pipe); I915_WRITE(reg, val & ~PORT_ENABLE); @@ -1894,12 +1894,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - if (adpa_pipe_enabled(dev_priv, pipe, val)) + if (adpa_pipe_enabled(dev_priv, val, pipe)) I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); reg = PCH_LVDS; val = I915_READ(reg); - if (lvds_pipe_enabled(dev_priv, pipe, val)) { + if (lvds_pipe_enabled(dev_priv, val, pipe)) { DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); I915_WRITE(reg, val & ~LVDS_PORT_EN); POSTING_READ(reg); diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index e9a6f6aaed85..e05c0d3e3440 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -780,14 +780,6 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), }, }, - { - .callback = intel_no_lvds_dmi_callback, - .ident = "Gigabyte GA-D525TUD", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), - DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), - }, - }, { } /* terminating entry */ }; diff --git a/trunk/drivers/gpu/drm/i915/intel_sprite.c b/trunk/drivers/gpu/drm/i915/intel_sprite.c index 7644f31a3778..cc8df4de2d92 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sprite.c +++ b/trunk/drivers/gpu/drm/i915/intel_sprite.c @@ -60,11 +60,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: - sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; + sprctl |= SPRITE_FORMAT_RGBX888; pixel_size = 4; break; case DRM_FORMAT_XRGB8888: - sprctl |= SPRITE_FORMAT_RGBX888; + sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; pixel_size = 4; break; case DRM_FORMAT_YUYV: diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_state.c b/trunk/drivers/gpu/drm/nouveau/nouveau_state.c index c61014442aa9..1866dbb49979 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_state.c @@ -736,11 +736,9 @@ nouveau_card_init(struct drm_device *dev) } break; case NV_C0: - if (!(nv_rd32(dev, 0x022500) & 0x00000200)) - nvc0_copy_create(dev, 1); + nvc0_copy_create(dev, 1); case NV_D0: - if (!(nv_rd32(dev, 0x022500) & 0x00000100)) - nvc0_copy_create(dev, 0); + nvc0_copy_create(dev, 0); break; default: break; diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c index 2817101fb167..f4d4505fe831 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c @@ -258,6 +258,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) radeon_crtc->enabled = true; /* adjust pm to dpms changes BEFORE enabling crtcs */ radeon_pm_compute_clocks(rdev); + /* disable crtc pair power gating before programming */ if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) atombios_powergate_crtc(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_ENABLE); @@ -277,8 +278,25 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; - if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) - atombios_powergate_crtc(crtc, ATOM_ENABLE); + /* power gating is per-pair */ + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) { + struct drm_crtc *other_crtc; + struct radeon_crtc *other_radeon_crtc; + list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) { + other_radeon_crtc = to_radeon_crtc(other_crtc); + if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) || + ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) || + ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) || + ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) || + ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) || + ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) { + /* if both crtcs in the pair are off, enable power gating */ + if (other_radeon_crtc->enabled == false) + atombios_powergate_crtc(crtc, ATOM_ENABLE); + break; + } + } + } /* adjust pm to dpms changes AFTER disabling crtcs */ radeon_pm_compute_clocks(rdev); break; @@ -1664,22 +1682,9 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_atom_ss ss; - int i; atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); - for (i = 0; i < rdev->num_crtc; i++) { - if (rdev->mode_info.crtcs[i] && - rdev->mode_info.crtcs[i]->enabled && - i != radeon_crtc->crtc_id && - radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) { - /* one other crtc is using this pll don't turn - * off the pll - */ - goto done; - } - } - switch (radeon_crtc->pll_id) { case ATOM_PPLL1: case ATOM_PPLL2: @@ -1696,7 +1701,6 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) default: break; } -done: radeon_crtc->pll_id = -1; } diff --git a/trunk/drivers/gpu/drm/radeon/atombios_dp.c b/trunk/drivers/gpu/drm/radeon/atombios_dp.c index 3623b98ed3fe..7712cf5ab33b 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_dp.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_dp.c @@ -577,25 +577,30 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; - u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector); - u8 tmp; if (!ASIC_IS_DCE4(rdev)) return panel_mode; - if (dp_bridge != ENCODER_OBJECT_ID_NONE) { - /* DP bridge chips */ - tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); - if (tmp & 1) - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; - else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || - (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) + if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == + ENCODER_OBJECT_ID_NUTMEG) + panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; + else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == + ENCODER_OBJECT_ID_TRAVIS) { + u8 id[6]; + int i; + for (i = 0; i < 6; i++) + id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i); + if (id[0] == 0x73 && + id[1] == 0x69 && + id[2] == 0x76 && + id[3] == 0x61 && + id[4] == 0x72 && + id[5] == 0x54) panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; else - panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { - /* eDP */ - tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); + u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); if (tmp & 1) panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; } diff --git a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c index 6e8803a1170c..f9bc27fe269a 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1379,8 +1379,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = NULL; struct radeon_connector_atom_dig *radeon_dig_connector = NULL; @@ -1392,37 +1390,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { - if (!connector) - dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; - else - dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); - - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - atombios_dig_encoder_setup(encoder, - ATOM_ENCODER_CMD_SETUP_PANEL_MODE, - dig->panel_mode); - if (ext_encoder) { - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); + /* some early dce3.2 boards have a bug in their transmitter control table */ + if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) || + ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + if (ASIC_IS_DCE6(rdev)) { + /* It seems we need to call ATOM_ENCODER_CMD_SETUP again + * before reenabling encoder on DPMS ON, otherwise we never + * get picture + */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); } atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } else if (ASIC_IS_DCE4(rdev)) { - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - /* enable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } else { - /* setup and enable the encoder and transmitter */ - atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - /* some early dce3.2 boards have a bug in their transmitter control table */ - if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730)) - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { @@ -1440,19 +1420,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - } else if (ASIC_IS_DCE4(rdev)) { - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - } else { - /* disable the encoder and transmitter */ + else atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); @@ -1769,34 +1740,13 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *test_encoder; - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct radeon_encoder_atom_dig *dig; uint32_t dig_enc_in_use = 0; - if (ASIC_IS_DCE6(rdev)) { - /* DCE6 */ - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - if (dig->linkb) - return 1; - else - return 0; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - if (dig->linkb) - return 3; - else - return 2; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (dig->linkb) - return 5; - else - return 4; - break; - } - } else if (ASIC_IS_DCE4(rdev)) { - /* DCE4/5 */ - if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) { + /* DCE4/5 */ + if (ASIC_IS_DCE4(rdev)) { + dig = radeon_encoder->enc_priv; + if (ASIC_IS_DCE41(rdev)) { /* ontario follows DCE4 */ if (rdev->family == CHIP_PALM) { if (dig->linkb) @@ -1898,12 +1848,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); radeon_encoder->pixel_clock = adjusted_mode->clock; - /* need to call this here rather than in prepare() since we need some crtc info */ - radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); - if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) atombios_yuv_setup(encoder, true); @@ -1922,7 +1870,38 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - /* handled in dpms */ + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + if (!connector) + dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + else + dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); + + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + atombios_dig_encoder_setup(encoder, + ATOM_ENCODER_CMD_SETUP_PANEL_MODE, + dig->panel_mode); + } else if (ASIC_IS_DCE4(rdev)) { + /* disable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + + /* enable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + } else { + /* disable the encoder and transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); + + /* setup and enable the encoder and transmitter */ + atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + } break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: @@ -1943,6 +1922,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, break; } + if (ext_encoder) { + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); + else + atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); + } + atombios_apply_encoder_quirks(encoder, adjusted_mode); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { @@ -2129,6 +2116,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) } radeon_atom_output_lock(encoder, true); + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -2149,7 +2137,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) static void radeon_atom_encoder_commit(struct drm_encoder *encoder) { - /* need to call this here as we need the crtc set up */ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); radeon_atom_output_lock(encoder, false); } @@ -2190,7 +2177,14 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - /* handled in dpms */ + if (ASIC_IS_DCE4(rdev)) + /* disable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + else { + /* disable the encoder and transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); + } break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: diff --git a/trunk/drivers/gpu/drm/radeon/r600_cs.c b/trunk/drivers/gpu/drm/radeon/r600_cs.c index f37676d7f217..ab74e6b149e7 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_cs.c +++ b/trunk/drivers/gpu/drm/radeon/r600_cs.c @@ -63,7 +63,6 @@ struct r600_cs_track { u32 cb_color_size_idx[8]; /* unused */ u32 cb_target_mask; u32 cb_shader_mask; /* unused */ - bool is_resolve; u32 cb_color_size[8]; u32 vgt_strmout_en; u32 vgt_strmout_buffer_en; @@ -316,15 +315,7 @@ static void r600_cs_track_init(struct r600_cs_track *track) track->cb_color_bo[i] = NULL; track->cb_color_bo_offset[i] = 0xFFFFFFFF; track->cb_color_bo_mc[i] = 0xFFFFFFFF; - track->cb_color_frag_bo[i] = NULL; - track->cb_color_frag_offset[i] = 0xFFFFFFFF; - track->cb_color_tile_bo[i] = NULL; - track->cb_color_tile_offset[i] = 0xFFFFFFFF; - track->cb_color_mask[i] = 0xFFFFFFFF; - } - track->is_resolve = false; - track->nsamples = 16; - track->log_nsamples = 4; + } track->cb_target_mask = 0xFFFFFFFF; track->cb_shader_mask = 0xFFFFFFFF; track->cb_dirty = true; @@ -361,8 +352,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) volatile u32 *ib = p->ib.ptr; unsigned array_mode; u32 format; - /* When resolve is used, the second colorbuffer has always 1 sample. */ - unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; format = G_0280A0_FORMAT(track->cb_color_info[i]); @@ -386,7 +375,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) array_check.group_size = track->group_size; array_check.nbanks = track->nbanks; array_check.npipes = track->npipes; - array_check.nsamples = nsamples; + array_check.nsamples = track->nsamples; array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { @@ -432,7 +421,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) /* check offset */ tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * - r600_fmt_get_blocksize(format) * nsamples; + r600_fmt_get_blocksize(format) * track->nsamples; switch (array_mode) { default: case V_0280A0_ARRAY_LINEAR_GENERAL: @@ -803,12 +792,6 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) */ if (track->cb_dirty) { tmp = track->cb_target_mask; - - /* We must check both colorbuffers for RESOLVE. */ - if (track->is_resolve) { - tmp |= 0xff; - } - for (i = 0; i < 8; i++) { if ((tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ @@ -1298,11 +1281,6 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) track->nsamples = 1 << tmp; track->cb_dirty = true; break; - case R_028808_CB_COLOR_CONTROL: - tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); - track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; - track->cb_dirty = true; - break; case R_0280A0_CB_COLOR0_INFO: case R_0280A4_CB_COLOR1_INFO: case R_0280A8_CB_COLOR2_INFO: @@ -1438,7 +1416,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case R_028118_CB_COLOR6_MASK: case R_02811C_CB_COLOR7_MASK: tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; - track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_mask[tmp] = ib[idx]; if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { track->cb_dirty = true; } diff --git a/trunk/drivers/gpu/drm/radeon/r600d.h b/trunk/drivers/gpu/drm/radeon/r600d.h index fa6f37099ba9..bdb69a63062f 100644 --- a/trunk/drivers/gpu/drm/radeon/r600d.h +++ b/trunk/drivers/gpu/drm/radeon/r600d.h @@ -66,14 +66,6 @@ #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) -#define R_028808_CB_COLOR_CONTROL 0x28808 -#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4) -#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7) -#define C_028808_SPECIAL_OP 0xFFFFFF8F -#define V_028808_SPECIAL_NORMAL 0x00 -#define V_028808_SPECIAL_DISABLE 0x01 -#define V_028808_SPECIAL_RESOLVE_BOX 0x07 - #define CB_COLOR0_BASE 0x28040 #define CB_COLOR1_BASE 0x28044 #define CB_COLOR2_BASE 0x28048 diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c index 7a3daebd732d..d2e243867ac6 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_device.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c @@ -1051,7 +1051,7 @@ int radeon_device_init(struct radeon_device *rdev, if (rdev->flags & RADEON_IS_AGP) rdev->need_dma32 = true; if ((rdev->flags & RADEON_IS_PCI) && - (rdev->family <= CHIP_RS740)) + (rdev->family < CHIP_RS400)) rdev->need_dma32 = true; dma_bits = rdev->need_dma32 ? 32 : 40; @@ -1346,15 +1346,12 @@ int radeon_gpu_reset(struct radeon_device *rdev) for (i = 0; i < RADEON_NUM_RINGS; ++i) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i], ring_data[i]); - ring_sizes[i] = 0; - ring_data[i] = NULL; } r = radeon_ib_ring_tests(rdev); if (r) { dev_err(rdev->dev, "ib ring test failed (%d).\n", r); if (saved) { - saved = false; radeon_suspend(rdev); goto retry; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.c b/trunk/drivers/gpu/drm/radeon/radeon_drv.c index 8c593ea82c41..27d22d709c90 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_drv.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.c @@ -63,10 +63,9 @@ * 2.19.0 - r600-eg: MSAA textures * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query * 2.21.0 - r600-r700: FMASK and CMASK - * 2.22.0 - r600 only: RESOLVE_BOX allowed */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 22 +#define KMS_DRIVER_MINOR 21 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 b/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 index 20bfbda7b3f1..f93e45d869f4 100644 --- a/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -744,6 +744,7 @@ r600 0x9400 0x00028C38 CB_CLRCMP_DST 0x00028C3C CB_CLRCMP_MSK 0x00028C34 CB_CLRCMP_SRC +0x00028808 CB_COLOR_CONTROL 0x0002842C CB_FOG_BLUE 0x00028428 CB_FOG_GREEN 0x00028424 CB_FOG_RED diff --git a/trunk/drivers/hwmon/asus_atk0110.c b/trunk/drivers/hwmon/asus_atk0110.c index 4ee578948723..351d1f4593e7 100644 --- a/trunk/drivers/hwmon/asus_atk0110.c +++ b/trunk/drivers/hwmon/asus_atk0110.c @@ -34,12 +34,6 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58") } - }, { - /* Old interface reads the same sensor for fan0 and fan1 */ - .ident = "Asus M5A78L", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "M5A78L") - } }, { } }; diff --git a/trunk/drivers/s390/block/dasd_eckd.c b/trunk/drivers/s390/block/dasd_eckd.c index 2fb2b9ea97ec..40a826a7295f 100644 --- a/trunk/drivers/s390/block/dasd_eckd.c +++ b/trunk/drivers/s390/block/dasd_eckd.c @@ -3804,7 +3804,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) case BIODASDSYMMIO: return dasd_symm_io(device, argp); default: - return -ENOTTY; + return -ENOIOCTLCMD; } } diff --git a/trunk/drivers/s390/block/dasd_ioctl.c b/trunk/drivers/s390/block/dasd_ioctl.c index 654c6921a6d4..cceae70279f6 100644 --- a/trunk/drivers/s390/block/dasd_ioctl.c +++ b/trunk/drivers/s390/block/dasd_ioctl.c @@ -498,9 +498,12 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode, break; default: /* if the discipline has an ioctl method try it. */ - rc = -ENOTTY; - if (base->discipline->ioctl) + if (base->discipline->ioctl) { rc = base->discipline->ioctl(block, cmd, argp); + if (rc == -ENOIOCTLCMD) + rc = -EINVAL; + } else + rc = -EINVAL; } dasd_put_device(base); return rc; diff --git a/trunk/drivers/watchdog/da9052_wdt.c b/trunk/drivers/watchdog/da9052_wdt.c index f7abbaeebcaf..3f75129eb0a9 100644 --- a/trunk/drivers/watchdog/da9052_wdt.c +++ b/trunk/drivers/watchdog/da9052_wdt.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/trunk/fs/btrfs/backref.c b/trunk/fs/btrfs/backref.c index ff6475f409d6..a256f3b2a845 100644 --- a/trunk/fs/btrfs/backref.c +++ b/trunk/fs/btrfs/backref.c @@ -1438,10 +1438,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, ret = extent_from_logical(fs_info, logical, path, &found_key); btrfs_release_path(path); + if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) + ret = -EINVAL; if (ret < 0) return ret; - if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) - return -EINVAL; extent_item_pos = logical - found_key.objectid; ret = iterate_extent_inodes(fs_info, found_key.objectid, diff --git a/trunk/fs/btrfs/compression.c b/trunk/fs/btrfs/compression.c index 43d1c5a3a030..86eff48dab78 100644 --- a/trunk/fs/btrfs/compression.c +++ b/trunk/fs/btrfs/compression.c @@ -818,7 +818,6 @@ static void free_workspace(int type, struct list_head *workspace) btrfs_compress_op[idx]->free_workspace(workspace); atomic_dec(alloc_workspace); wake: - smp_mb(); if (waitqueue_active(workspace_wait)) wake_up(workspace_wait); } diff --git a/trunk/fs/btrfs/ctree.c b/trunk/fs/btrfs/ctree.c index 6d183f60d63a..9d7621f271ff 100644 --- a/trunk/fs/btrfs/ctree.c +++ b/trunk/fs/btrfs/ctree.c @@ -420,6 +420,12 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, } spin_unlock(&fs_info->tree_mod_seq_lock); + /* + * we removed the lowest blocker from the blocker list, so there may be + * more processible delayed refs. + */ + wake_up(&fs_info->tree_mod_seq_wait); + /* * anything that's lower than the lowest existing (read: blocked) * sequence number can be removed from the tree. @@ -625,9 +631,6 @@ __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) u32 nritems; int ret; - if (btrfs_header_level(eb) == 0) - return; - nritems = btrfs_header_nritems(eb); for (i = nritems - 1; i >= 0; i--) { ret = tree_mod_log_insert_key_locked(fs_info, eb, i, diff --git a/trunk/fs/btrfs/ctree.h b/trunk/fs/btrfs/ctree.h index 0d195b507660..4bab807227ad 100644 --- a/trunk/fs/btrfs/ctree.h +++ b/trunk/fs/btrfs/ctree.h @@ -1252,6 +1252,7 @@ struct btrfs_fs_info { atomic_t tree_mod_seq; struct list_head tree_mod_seq_list; struct seq_list tree_mod_seq_elem; + wait_queue_head_t tree_mod_seq_wait; /* this protects tree_mod_log */ rwlock_t tree_mod_log_lock; @@ -3191,7 +3192,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio, u32 *dst); int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, - struct bio *bio, u64 logical_offset); + struct bio *bio, u64 logical_offset, u32 *dst); int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, diff --git a/trunk/fs/btrfs/delayed-inode.c b/trunk/fs/btrfs/delayed-inode.c index 07d5eeb1e6f1..335605c8ceab 100644 --- a/trunk/fs/btrfs/delayed-inode.c +++ b/trunk/fs/btrfs/delayed-inode.c @@ -512,8 +512,8 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) rb_erase(&delayed_item->rb_node, root); delayed_item->delayed_node->count--; - if (atomic_dec_return(&delayed_root->items) < - BTRFS_DELAYED_BACKGROUND && + atomic_dec(&delayed_root->items); + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && waitqueue_active(&delayed_root->wait)) wake_up(&delayed_root->wait); } @@ -1028,10 +1028,9 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, btrfs_release_delayed_item(prev); ret = 0; btrfs_release_path(path); - if (curr) { - mutex_unlock(&node->mutex); + if (curr) goto do_again; - } else + else goto delete_fail; } @@ -1056,7 +1055,8 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) delayed_node->count--; delayed_root = delayed_node->root->fs_info->delayed_root; - if (atomic_dec_return(&delayed_root->items) < + atomic_dec(&delayed_root->items); + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && waitqueue_active(&delayed_root->wait)) wake_up(&delayed_root->wait); diff --git a/trunk/fs/btrfs/delayed-ref.c b/trunk/fs/btrfs/delayed-ref.c index ae9411773397..da7419ed01bb 100644 --- a/trunk/fs/btrfs/delayed-ref.c +++ b/trunk/fs/btrfs/delayed-ref.c @@ -38,14 +38,17 @@ static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, struct btrfs_delayed_tree_ref *ref1) { - if (ref1->root < ref2->root) - return -1; - if (ref1->root > ref2->root) - return 1; - if (ref1->parent < ref2->parent) - return -1; - if (ref1->parent > ref2->parent) - return 1; + if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) { + if (ref1->root < ref2->root) + return -1; + if (ref1->root > ref2->root) + return 1; + } else { + if (ref1->parent < ref2->parent) + return -1; + if (ref1->parent > ref2->parent) + return 1; + } return 0; } @@ -82,8 +85,7 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2, * type of the delayed backrefs and content of delayed backrefs. */ static int comp_entry(struct btrfs_delayed_ref_node *ref2, - struct btrfs_delayed_ref_node *ref1, - bool compare_seq) + struct btrfs_delayed_ref_node *ref1) { if (ref1->bytenr < ref2->bytenr) return -1; @@ -100,12 +102,10 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2, if (ref1->type > ref2->type) return 1; /* merging of sequenced refs is not allowed */ - if (compare_seq) { - if (ref1->seq < ref2->seq) - return -1; - if (ref1->seq > ref2->seq) - return 1; - } + if (ref1->seq < ref2->seq) + return -1; + if (ref1->seq > ref2->seq) + return 1; if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), @@ -139,7 +139,7 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root, entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, rb_node); - cmp = comp_entry(entry, ins, 1); + cmp = comp_entry(entry, ins); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) @@ -233,114 +233,6 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, return 0; } -static void inline drop_delayed_ref(struct btrfs_trans_handle *trans, - struct btrfs_delayed_ref_root *delayed_refs, - struct btrfs_delayed_ref_node *ref) -{ - rb_erase(&ref->rb_node, &delayed_refs->root); - ref->in_tree = 0; - btrfs_put_delayed_ref(ref); - delayed_refs->num_entries--; - if (trans->delayed_ref_updates) - trans->delayed_ref_updates--; -} - -static int merge_ref(struct btrfs_trans_handle *trans, - struct btrfs_delayed_ref_root *delayed_refs, - struct btrfs_delayed_ref_node *ref, u64 seq) -{ - struct rb_node *node; - int merged = 0; - int mod = 0; - int done = 0; - - node = rb_prev(&ref->rb_node); - while (node) { - struct btrfs_delayed_ref_node *next; - - next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); - node = rb_prev(node); - if (next->bytenr != ref->bytenr) - break; - if (seq && next->seq >= seq) - break; - if (comp_entry(ref, next, 0)) - continue; - - if (ref->action == next->action) { - mod = next->ref_mod; - } else { - if (ref->ref_mod < next->ref_mod) { - struct btrfs_delayed_ref_node *tmp; - - tmp = ref; - ref = next; - next = tmp; - done = 1; - } - mod = -next->ref_mod; - } - - merged++; - drop_delayed_ref(trans, delayed_refs, next); - ref->ref_mod += mod; - if (ref->ref_mod == 0) { - drop_delayed_ref(trans, delayed_refs, ref); - break; - } else { - /* - * You can't have multiples of the same ref on a tree - * block. - */ - WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || - ref->type == BTRFS_SHARED_BLOCK_REF_KEY); - } - - if (done) - break; - node = rb_prev(&ref->rb_node); - } - - return merged; -} - -void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, - struct btrfs_delayed_ref_root *delayed_refs, - struct btrfs_delayed_ref_head *head) -{ - struct rb_node *node; - u64 seq = 0; - - spin_lock(&fs_info->tree_mod_seq_lock); - if (!list_empty(&fs_info->tree_mod_seq_list)) { - struct seq_list *elem; - - elem = list_first_entry(&fs_info->tree_mod_seq_list, - struct seq_list, list); - seq = elem->seq; - } - spin_unlock(&fs_info->tree_mod_seq_lock); - - node = rb_prev(&head->node.rb_node); - while (node) { - struct btrfs_delayed_ref_node *ref; - - ref = rb_entry(node, struct btrfs_delayed_ref_node, - rb_node); - if (ref->bytenr != head->node.bytenr) - break; - - /* We can't merge refs that are outside of our seq count */ - if (seq && ref->seq >= seq) - break; - if (merge_ref(trans, delayed_refs, ref, seq)) - node = rb_prev(&head->node.rb_node); - else - node = rb_prev(node); - } -} - int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, u64 seq) @@ -444,11 +336,18 @@ update_existing_ref(struct btrfs_trans_handle *trans, * every changing the extent allocation tree. */ existing->ref_mod--; - if (existing->ref_mod == 0) - drop_delayed_ref(trans, delayed_refs, existing); - else + if (existing->ref_mod == 0) { + rb_erase(&existing->rb_node, + &delayed_refs->root); + existing->in_tree = 0; + btrfs_put_delayed_ref(existing); + delayed_refs->num_entries--; + if (trans->delayed_ref_updates) + trans->delayed_ref_updates--; + } else { WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || existing->type == BTRFS_SHARED_BLOCK_REF_KEY); + } } else { WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || existing->type == BTRFS_SHARED_BLOCK_REF_KEY); @@ -763,6 +662,9 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, num_bytes, parent, ref_root, level, action, for_cow); + if (!need_ref_seq(for_cow, ref_root) && + waitqueue_active(&fs_info->tree_mod_seq_wait)) + wake_up(&fs_info->tree_mod_seq_wait); spin_unlock(&delayed_refs->lock); if (need_ref_seq(for_cow, ref_root)) btrfs_qgroup_record_ref(trans, &ref->node, extent_op); @@ -811,6 +713,9 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, num_bytes, parent, ref_root, owner, offset, action, for_cow); + if (!need_ref_seq(for_cow, ref_root) && + waitqueue_active(&fs_info->tree_mod_seq_wait)) + wake_up(&fs_info->tree_mod_seq_wait); spin_unlock(&delayed_refs->lock); if (need_ref_seq(for_cow, ref_root)) btrfs_qgroup_record_ref(trans, &ref->node, extent_op); @@ -839,6 +744,8 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, num_bytes, BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data); + if (waitqueue_active(&fs_info->tree_mod_seq_wait)) + wake_up(&fs_info->tree_mod_seq_wait); spin_unlock(&delayed_refs->lock); return 0; } diff --git a/trunk/fs/btrfs/delayed-ref.h b/trunk/fs/btrfs/delayed-ref.h index ab5300595847..0d7c90c366b6 100644 --- a/trunk/fs/btrfs/delayed-ref.h +++ b/trunk/fs/btrfs/delayed-ref.h @@ -167,10 +167,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, struct btrfs_delayed_extent_op *extent_op); -void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, - struct btrfs_delayed_ref_root *delayed_refs, - struct btrfs_delayed_ref_head *head); struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index 22e98e04c2ea..62e0cafd6e25 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -377,13 +377,9 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, ret = read_extent_buffer_pages(io_tree, eb, start, WAIT_COMPLETE, btree_get_extent, mirror_num); - if (!ret) { - if (!verify_parent_transid(io_tree, eb, + if (!ret && !verify_parent_transid(io_tree, eb, parent_transid, 0)) - break; - else - ret = -EIO; - } + break; /* * This buffer's crc is fine, but its contents are corrupted, so @@ -758,7 +754,9 @@ static void run_one_async_done(struct btrfs_work *work) limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; - if (atomic_dec_return(&fs_info->nr_async_submits) < limit && + atomic_dec(&fs_info->nr_async_submits); + + if (atomic_read(&fs_info->nr_async_submits) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); @@ -2034,6 +2032,8 @@ int open_ctree(struct super_block *sb, fs_info->free_chunk_space = 0; fs_info->tree_mod_log = RB_ROOT; + init_waitqueue_head(&fs_info->tree_mod_seq_wait); + /* readahead state */ INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); spin_lock_init(&fs_info->reada_lock); @@ -2528,7 +2528,8 @@ int open_ctree(struct super_block *sb, goto fail_trans_kthread; /* do not make disk changes in broken FS */ - if (btrfs_super_log_root(disk_super) != 0) { + if (btrfs_super_log_root(disk_super) != 0 && + !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) { u64 bytenr = btrfs_super_log_root(disk_super); if (fs_devices->rw_devices == 0) { @@ -3188,14 +3189,30 @@ int close_ctree(struct btrfs_root *root) /* clear out the rbtree of defraggable inodes */ btrfs_run_defrag_inodes(fs_info); + /* + * Here come 2 situations when btrfs is broken to flip readonly: + * + * 1. when btrfs flips readonly somewhere else before + * btrfs_commit_super, sb->s_flags has MS_RDONLY flag, + * and btrfs will skip to write sb directly to keep + * ERROR state on disk. + * + * 2. when btrfs flips readonly just in btrfs_commit_super, + * and in such case, btrfs cannot write sb via btrfs_commit_super, + * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag, + * btrfs will cleanup all FS resources first and write sb then. + */ if (!(fs_info->sb->s_flags & MS_RDONLY)) { ret = btrfs_commit_super(root); if (ret) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } - if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) - btrfs_error_commit_super(root); + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { + ret = btrfs_error_commit_super(root); + if (ret) + printk(KERN_ERR "btrfs: commit super ret %d\n", ret); + } btrfs_put_block_group_cache(fs_info); @@ -3417,11 +3434,18 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, if (read_only) return 0; + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { + printk(KERN_WARNING "warning: mount fs with errors, " + "running btrfsck is recommended\n"); + } + return 0; } -void btrfs_error_commit_super(struct btrfs_root *root) +int btrfs_error_commit_super(struct btrfs_root *root) { + int ret; + mutex_lock(&root->fs_info->cleaner_mutex); btrfs_run_delayed_iputs(root); mutex_unlock(&root->fs_info->cleaner_mutex); @@ -3431,6 +3455,10 @@ void btrfs_error_commit_super(struct btrfs_root *root) /* cleanup FS via transaction */ btrfs_cleanup_transaction(root); + + ret = write_ctree_super(NULL, root, 0); + + return ret; } static void btrfs_destroy_ordered_operations(struct btrfs_root *root) @@ -3754,17 +3782,14 @@ int btrfs_cleanup_transaction(struct btrfs_root *root) /* FIXME: cleanup wait for commit */ t->in_commit = 1; t->blocked = 1; - smp_mb(); if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) wake_up(&root->fs_info->transaction_blocked_wait); t->blocked = 0; - smp_mb(); if (waitqueue_active(&root->fs_info->transaction_wait)) wake_up(&root->fs_info->transaction_wait); t->commit_done = 1; - smp_mb(); if (waitqueue_active(&t->commit_wait)) wake_up(&t->commit_wait); diff --git a/trunk/fs/btrfs/disk-io.h b/trunk/fs/btrfs/disk-io.h index c5b00a735fef..95e147eea239 100644 --- a/trunk/fs/btrfs/disk-io.h +++ b/trunk/fs/btrfs/disk-io.h @@ -54,7 +54,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root *root, int max_mirrors); struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); int btrfs_commit_super(struct btrfs_root *root); -void btrfs_error_commit_super(struct btrfs_root *root); +int btrfs_error_commit_super(struct btrfs_root *root); struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index ba58024d40d3..4e1b153b7c47 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -2251,16 +2251,6 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, } } - /* - * We need to try and merge add/drops of the same ref since we - * can run into issues with relocate dropping the implicit ref - * and then it being added back again before the drop can - * finish. If we merged anything we need to re-loop so we can - * get a good ref. - */ - btrfs_merge_delayed_refs(trans, fs_info, delayed_refs, - locked_ref); - /* * locked_ref is the head node, so we have to go one * node back for any delayed ref updates @@ -2328,23 +2318,12 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, ref->in_tree = 0; rb_erase(&ref->rb_node, &delayed_refs->root); delayed_refs->num_entries--; - if (locked_ref) { - /* - * when we play the delayed ref, also correct the - * ref_mod on head - */ - switch (ref->action) { - case BTRFS_ADD_DELAYED_REF: - case BTRFS_ADD_DELAYED_EXTENT: - locked_ref->node.ref_mod -= ref->ref_mod; - break; - case BTRFS_DROP_DELAYED_REF: - locked_ref->node.ref_mod += ref->ref_mod; - break; - default: - WARN_ON(1); - } - } + /* + * we modified num_entries, but as we're currently running + * delayed refs, skip + * wake_up(&delayed_refs->seq_wait); + * here. + */ spin_unlock(&delayed_refs->lock); ret = run_one_delayed_ref(trans, root, ref, extent_op, @@ -2371,6 +2350,22 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, return count; } +static void wait_for_more_refs(struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_root *delayed_refs, + unsigned long num_refs, + struct list_head *first_seq) +{ + spin_unlock(&delayed_refs->lock); + pr_debug("waiting for more refs (num %ld, first %p)\n", + num_refs, first_seq); + wait_event(fs_info->tree_mod_seq_wait, + num_refs != delayed_refs->num_entries || + fs_info->tree_mod_seq_list.next != first_seq); + pr_debug("done waiting for more refs (num %ld, first %p)\n", + delayed_refs->num_entries, fs_info->tree_mod_seq_list.next); + spin_lock(&delayed_refs->lock); +} + #ifdef SCRAMBLE_DELAYED_REFS /* * Normally delayed refs get processed in ascending bytenr order. This @@ -2465,11 +2460,13 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_node *ref; struct list_head cluster; + struct list_head *first_seq = NULL; int ret; u64 delayed_start; int run_all = count == (unsigned long)-1; int run_most = 0; - int loops; + unsigned long num_refs = 0; + int consider_waiting; /* We'll clean this up in btrfs_cleanup_transaction */ if (trans->aborted) @@ -2487,7 +2484,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, delayed_refs = &trans->transaction->delayed_refs; INIT_LIST_HEAD(&cluster); again: - loops = 0; + consider_waiting = 0; spin_lock(&delayed_refs->lock); #ifdef SCRAMBLE_DELAYED_REFS @@ -2515,6 +2512,31 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, if (ret) break; + if (delayed_start >= delayed_refs->run_delayed_start) { + if (consider_waiting == 0) { + /* + * btrfs_find_ref_cluster looped. let's do one + * more cycle. if we don't run any delayed ref + * during that cycle (because we can't because + * all of them are blocked) and if the number of + * refs doesn't change, we avoid busy waiting. + */ + consider_waiting = 1; + num_refs = delayed_refs->num_entries; + first_seq = root->fs_info->tree_mod_seq_list.next; + } else { + wait_for_more_refs(root->fs_info, delayed_refs, + num_refs, first_seq); + /* + * after waiting, things have changed. we + * dropped the lock and someone else might have + * run some refs, built new clusters and so on. + * therefore, we restart staleness detection. + */ + consider_waiting = 0; + } + } + ret = run_clustered_refs(trans, root, &cluster); if (ret < 0) { spin_unlock(&delayed_refs->lock); @@ -2527,26 +2549,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, if (count == 0) break; - if (delayed_start >= delayed_refs->run_delayed_start) { - if (loops == 0) { - /* - * btrfs_find_ref_cluster looped. let's do one - * more cycle. if we don't run any delayed ref - * during that cycle (because we can't because - * all of them are blocked), bail out. - */ - loops = 1; - } else { - /* - * no runnable refs left, stop trying - */ - BUG_ON(run_all); - break; - } - } - if (ret) { + if (ret || delayed_refs->run_delayed_start == 0) { /* refs were run, let's reset staleness detection */ - loops = 0; + consider_waiting = 0; } } @@ -3002,16 +3007,17 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, } spin_unlock(&block_group->lock); - /* - * Try to preallocate enough space based on how big the block group is. - * Keep in mind this has to include any pinned space which could end up - * taking up quite a bit since it's not folded into the other space - * cache. - */ - num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024); + num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024); if (!num_pages) num_pages = 1; + /* + * Just to make absolutely sure we have enough space, we're going to + * preallocate 12 pages worth of space for each block group. In + * practice we ought to use at most 8, but we need extra space so we can + * add our header and have a terminator between the extents and the + * bitmaps. + */ num_pages *= 16; num_pages *= PAGE_CACHE_SIZE; @@ -4565,10 +4571,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) if (root->fs_info->quota_enabled) { ret = btrfs_qgroup_reserve(root, num_bytes + nr_extents * root->leafsize); - if (ret) { - mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); + if (ret) return ret; - } } ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); @@ -5290,6 +5294,9 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, rb_erase(&head->node.rb_node, &delayed_refs->root); delayed_refs->num_entries--; + smp_mb(); + if (waitqueue_active(&root->fs_info->tree_mod_seq_wait)) + wake_up(&root->fs_info->tree_mod_seq_wait); /* * we don't take a ref on the node because we're removing it from the diff --git a/trunk/fs/btrfs/extent_io.c b/trunk/fs/btrfs/extent_io.c index 4c878476bb91..45c81bb4ac82 100644 --- a/trunk/fs/btrfs/extent_io.c +++ b/trunk/fs/btrfs/extent_io.c @@ -2330,10 +2330,23 @@ static void end_bio_extent_readpage(struct bio *bio, int err) if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { ret = tree->ops->readpage_end_io_hook(page, start, end, state, mirror); - if (ret) + if (ret) { + /* no IO indicated but software detected errors + * in the block, either checksum errors or + * issues with the contents */ + struct btrfs_root *root = + BTRFS_I(page->mapping->host)->root; + struct btrfs_device *device; + uptodate = 0; - else + device = btrfs_find_device_for_logical( + root, start, mirror); + if (device) + btrfs_dev_stat_inc_and_print(device, + BTRFS_DEV_STAT_CORRUPTION_ERRS); + } else { clean_io_failure(start, page); + } } if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { diff --git a/trunk/fs/btrfs/file-item.c b/trunk/fs/btrfs/file-item.c index 857d93cd01dc..b45b9de0c21d 100644 --- a/trunk/fs/btrfs/file-item.c +++ b/trunk/fs/btrfs/file-item.c @@ -272,9 +272,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, } int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, - struct bio *bio, u64 offset) + struct bio *bio, u64 offset, u32 *dst) { - return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1); + return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1); } int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index ec154f954646..6e8f416773d4 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -1008,7 +1008,9 @@ static noinline void async_cow_submit(struct btrfs_work *work) nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> PAGE_CACHE_SHIFT; - if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < + atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); + + if (atomic_read(&root->fs_info->async_delalloc_pages) < 5 * 1024 * 1024 && waitqueue_active(&root->fs_info->async_submit_wait)) wake_up(&root->fs_info->async_submit_wait); @@ -1883,11 +1885,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) trans = btrfs_join_transaction_nolock(root); else trans = btrfs_join_transaction(root); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - trans = NULL; - goto out; - } + if (IS_ERR(trans)) + return PTR_ERR(trans); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode_fallback(trans, root, inode); if (ret) /* -ENOMEM or corruption */ @@ -3175,7 +3174,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, btrfs_i_size_write(dir, dir->i_size - name_len * 2); inode_inc_iversion(dir); dir->i_mtime = dir->i_ctime = CURRENT_TIME; - ret = btrfs_update_inode_fallback(trans, root, dir); + ret = btrfs_update_inode(trans, root, dir); if (ret) btrfs_abort_transaction(trans, root, ret); out: @@ -5775,112 +5774,18 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, return ret; } -static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, - struct extent_state **cached_state, int writing) -{ - struct btrfs_ordered_extent *ordered; - int ret = 0; - - while (1) { - lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, - 0, cached_state); - /* - * We're concerned with the entire range that we're going to be - * doing DIO to, so we need to make sure theres no ordered - * extents in this range. - */ - ordered = btrfs_lookup_ordered_range(inode, lockstart, - lockend - lockstart + 1); - - /* - * We need to make sure there are no buffered pages in this - * range either, we could have raced between the invalidate in - * generic_file_direct_write and locking the extent. The - * invalidate needs to happen so that reads after a write do not - * get stale data. - */ - if (!ordered && (!writing || - !test_range_bit(&BTRFS_I(inode)->io_tree, - lockstart, lockend, EXTENT_UPTODATE, 0, - *cached_state))) - break; - - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, - cached_state, GFP_NOFS); - - if (ordered) { - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - } else { - /* Screw you mmap */ - ret = filemap_write_and_wait_range(inode->i_mapping, - lockstart, - lockend); - if (ret) - break; - - /* - * If we found a page that couldn't be invalidated just - * fall back to buffered. - */ - ret = invalidate_inode_pages2_range(inode->i_mapping, - lockstart >> PAGE_CACHE_SHIFT, - lockend >> PAGE_CACHE_SHIFT); - if (ret) - break; - } - - cond_resched(); - } - - return ret; -} - static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct extent_map *em; struct btrfs_root *root = BTRFS_I(inode)->root; - struct extent_state *cached_state = NULL; u64 start = iblock << inode->i_blkbits; - u64 lockstart, lockend; u64 len = bh_result->b_size; struct btrfs_trans_handle *trans; - int unlock_bits = EXTENT_LOCKED; - int ret; - - if (create) { - ret = btrfs_delalloc_reserve_space(inode, len); - if (ret) - return ret; - unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY; - } else { - len = min_t(u64, len, root->sectorsize); - } - - lockstart = start; - lockend = start + len - 1; - - /* - * If this errors out it's because we couldn't invalidate pagecache for - * this range and we need to fallback to buffered. - */ - if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create)) - return -ENOTBLK; - - if (create) { - ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, - lockend, EXTENT_DELALLOC, NULL, - &cached_state, GFP_NOFS); - if (ret) - goto unlock_err; - } em = btrfs_get_extent(inode, NULL, 0, start, len, 0); - if (IS_ERR(em)) { - ret = PTR_ERR(em); - goto unlock_err; - } + if (IS_ERR(em)) + return PTR_ERR(em); /* * Ok for INLINE and COMPRESSED extents we need to fallback on buffered @@ -5899,16 +5804,17 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || em->block_start == EXTENT_MAP_INLINE) { free_extent_map(em); - ret = -ENOTBLK; - goto unlock_err; + return -ENOTBLK; } /* Just a good old fashioned hole, return */ if (!create && (em->block_start == EXTENT_MAP_HOLE || test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { free_extent_map(em); - ret = 0; - goto unlock_err; + /* DIO will do one hole at a time, so just unlock a sector */ + unlock_extent(&BTRFS_I(inode)->io_tree, start, + start + root->sectorsize - 1); + return 0; } /* @@ -5921,9 +5827,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * */ if (!create) { - len = min(len, em->len - (start - em->start)); - lockstart = start + len; - goto unlock; + len = em->len - (start - em->start); + goto map; } if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || @@ -5955,7 +5860,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, btrfs_end_transaction(trans, root); if (ret) { free_extent_map(em); - goto unlock_err; + return ret; } goto unlock; } @@ -5968,12 +5873,14 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, */ len = bh_result->b_size; em = btrfs_new_extent_direct(inode, em, start, len); - if (IS_ERR(em)) { - ret = PTR_ERR(em); - goto unlock_err; - } + if (IS_ERR(em)) + return PTR_ERR(em); len = min(len, em->len - (start - em->start)); unlock: + clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, + EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, + 0, NULL, GFP_NOFS); +map: bh_result->b_blocknr = (em->block_start + (start - em->start)) >> inode->i_blkbits; bh_result->b_size = len; @@ -5991,44 +5898,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, i_size_write(inode, start + len); } - /* - * In the case of write we need to clear and unlock the entire range, - * in the case of read we need to unlock only the end area that we - * aren't using if there is any left over space. - */ - if (lockstart < lockend) { - if (create && len < lockend - lockstart) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, - lockstart + len - 1, unlock_bits, 1, 0, - &cached_state, GFP_NOFS); - /* - * Beside unlock, we also need to cleanup reserved space - * for the left range by attaching EXTENT_DO_ACCOUNTING. - */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, - lockstart + len, lockend, - unlock_bits | EXTENT_DO_ACCOUNTING, - 1, 0, NULL, GFP_NOFS); - } else { - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, - lockend, unlock_bits, 1, 0, - &cached_state, GFP_NOFS); - } - } else { - free_extent_state(cached_state); - } - free_extent_map(em); return 0; - -unlock_err: - if (create) - unlock_bits |= EXTENT_DO_ACCOUNTING; - - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, - unlock_bits, 1, 0, &cached_state, GFP_NOFS); - return ret; } struct btrfs_dio_private { @@ -6036,6 +5908,7 @@ struct btrfs_dio_private { u64 logical_offset; u64 disk_bytenr; u64 bytes; + u32 *csums; void *private; /* number of bios pending for this dio */ @@ -6055,6 +5928,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) struct inode *inode = dip->inode; struct btrfs_root *root = BTRFS_I(inode)->root; u64 start; + u32 *private = dip->csums; start = dip->logical_offset; do { @@ -6062,12 +5936,8 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) struct page *page = bvec->bv_page; char *kaddr; u32 csum = ~(u32)0; - u64 private = ~(u32)0; unsigned long flags; - if (get_state_private(&BTRFS_I(inode)->io_tree, - start, &private)) - goto failed; local_irq_save(flags); kaddr = kmap_atomic(page); csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, @@ -6077,18 +5947,18 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) local_irq_restore(flags); flush_dcache_page(bvec->bv_page); - if (csum != private) { -failed: + if (csum != *private) { printk(KERN_ERR "btrfs csum failed ino %llu off" " %llu csum %u private %u\n", (unsigned long long)btrfs_ino(inode), (unsigned long long)start, - csum, (unsigned)private); + csum, *private); err = -EIO; } } start += bvec->bv_len; + private++; bvec++; } while (bvec <= bvec_end); @@ -6096,6 +5966,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) dip->logical_offset + dip->bytes - 1); bio->bi_private = dip->private; + kfree(dip->csums); kfree(dip); /* If we had a csum failure make sure to clear the uptodate flag */ @@ -6201,7 +6072,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, int rw, u64 file_offset, int skip_sum, - int async_submit) + u32 *csums, int async_submit) { int write = rw & REQ_WRITE; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -6234,7 +6105,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, if (ret) goto err; } else if (!skip_sum) { - ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset); + ret = btrfs_lookup_bio_sums_dio(root, inode, bio, + file_offset, csums); if (ret) goto err; } @@ -6260,8 +6132,10 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, u64 submit_len = 0; u64 map_length; int nr_pages = 0; + u32 *csums = dip->csums; int ret = 0; int async_submit = 0; + int write = rw & REQ_WRITE; map_length = orig_bio->bi_size; ret = btrfs_map_block(map_tree, READ, start_sector << 9, @@ -6297,13 +6171,16 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, atomic_inc(&dip->pending_bios); ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - async_submit); + csums, async_submit); if (ret) { bio_put(bio); atomic_dec(&dip->pending_bios); goto out_err; } + /* Write's use the ordered csums */ + if (!write && !skip_sum) + csums = csums + nr_pages; start_sector += submit_len >> 9; file_offset += submit_len; @@ -6333,7 +6210,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, submit: ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - async_submit); + csums, async_submit); if (!ret) return 0; @@ -6369,6 +6246,17 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, ret = -ENOMEM; goto free_ordered; } + dip->csums = NULL; + + /* Write's use the ordered csum stuff, so we don't need dip->csums */ + if (!write && !skip_sum) { + dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); + if (!dip->csums) { + kfree(dip); + ret = -ENOMEM; + goto free_ordered; + } + } dip->private = bio->bi_private; dip->inode = inode; @@ -6453,22 +6341,132 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io out: return retval; } - static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; + struct btrfs_ordered_extent *ordered; + struct extent_state *cached_state = NULL; + u64 lockstart, lockend; + ssize_t ret; + int writing = rw & WRITE; + int write_bits = 0; + size_t count = iov_length(iov, nr_segs); if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, - offset, nr_segs)) + offset, nr_segs)) { return 0; + } + + lockstart = offset; + lockend = offset + count - 1; + + if (writing) { + ret = btrfs_delalloc_reserve_space(inode, count); + if (ret) + goto out; + } + + while (1) { + lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, + 0, &cached_state); + /* + * We're concerned with the entire range that we're going to be + * doing DIO to, so we need to make sure theres no ordered + * extents in this range. + */ + ordered = btrfs_lookup_ordered_range(inode, lockstart, + lockend - lockstart + 1); + + /* + * We need to make sure there are no buffered pages in this + * range either, we could have raced between the invalidate in + * generic_file_direct_write and locking the extent. The + * invalidate needs to happen so that reads after a write do not + * get stale data. + */ + if (!ordered && (!writing || + !test_range_bit(&BTRFS_I(inode)->io_tree, + lockstart, lockend, EXTENT_UPTODATE, 0, + cached_state))) + break; + + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, + &cached_state, GFP_NOFS); + + if (ordered) { + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); + } else { + /* Screw you mmap */ + ret = filemap_write_and_wait_range(file->f_mapping, + lockstart, + lockend); + if (ret) + goto out; + + /* + * If we found a page that couldn't be invalidated just + * fall back to buffered. + */ + ret = invalidate_inode_pages2_range(file->f_mapping, + lockstart >> PAGE_CACHE_SHIFT, + lockend >> PAGE_CACHE_SHIFT); + if (ret) { + if (ret == -EBUSY) + ret = 0; + goto out; + } + } + + cond_resched(); + } - return __blockdev_direct_IO(rw, iocb, inode, + /* + * we don't use btrfs_set_extent_delalloc because we don't want + * the dirty or uptodate bits + */ + if (writing) { + write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; + ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, + EXTENT_DELALLOC, NULL, &cached_state, + GFP_NOFS); + if (ret) { + clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, + lockend, EXTENT_LOCKED | write_bits, + 1, 0, &cached_state, GFP_NOFS); + goto out; + } + } + + free_extent_state(cached_state); + cached_state = NULL; + + ret = __blockdev_direct_IO(rw, iocb, inode, BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, btrfs_submit_direct, 0); + + if (ret < 0 && ret != -EIOCBQUEUED) { + clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, + offset + iov_length(iov, nr_segs) - 1, + EXTENT_LOCKED | write_bits, 1, 0, + &cached_state, GFP_NOFS); + } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { + /* + * We're falling back to buffered, unlock the section we didn't + * do IO on. + */ + clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, + offset + iov_length(iov, nr_segs) - 1, + EXTENT_LOCKED | write_bits, 1, 0, + &cached_state, GFP_NOFS); + } +out: + free_extent_state(cached_state); + return ret; } static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c index 9df50fa8a078..7bb755677a22 100644 --- a/trunk/fs/btrfs/ioctl.c +++ b/trunk/fs/btrfs/ioctl.c @@ -424,7 +424,7 @@ static noinline int create_subvol(struct btrfs_root *root, uuid_le_gen(&new_uuid); memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE); root_item.otime.sec = cpu_to_le64(cur_time.tv_sec); - root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec); + root_item.otime.nsec = cpu_to_le64(cur_time.tv_nsec); root_item.ctime = root_item.otime; btrfs_set_root_ctransid(&root_item, trans->transid); btrfs_set_root_otransid(&root_item, trans->transid); diff --git a/trunk/fs/btrfs/locking.c b/trunk/fs/btrfs/locking.c index 2a1762c66041..a44eff074805 100644 --- a/trunk/fs/btrfs/locking.c +++ b/trunk/fs/btrfs/locking.c @@ -67,7 +67,7 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) { if (eb->lock_nested) { read_lock(&eb->lock); - if (eb->lock_nested && current->pid == eb->lock_owner) { + if (&eb->lock_nested && current->pid == eb->lock_owner) { read_unlock(&eb->lock); return; } diff --git a/trunk/fs/btrfs/qgroup.c b/trunk/fs/btrfs/qgroup.c index 38b42e7bc91d..bc424ae5a81a 100644 --- a/trunk/fs/btrfs/qgroup.c +++ b/trunk/fs/btrfs/qgroup.c @@ -1364,17 +1364,13 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, spin_lock(&fs_info->qgroup_lock); dstgroup = add_qgroup_rb(fs_info, objectid); - if (IS_ERR(dstgroup)) { - ret = PTR_ERR(dstgroup); + if (!dstgroup) goto unlock; - } if (srcid) { srcgroup = find_qgroup_rb(fs_info, srcid); - if (!srcgroup) { - ret = -EINVAL; + if (!srcgroup) goto unlock; - } dstgroup->rfer = srcgroup->rfer - level_size; dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size; srcgroup->excl = level_size; @@ -1383,10 +1379,8 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, qgroup_dirty(fs_info, srcgroup); } - if (!inherit) { - ret = -EINVAL; + if (!inherit) goto unlock; - } i_qgroups = (u64 *)(inherit + 1); for (i = 0; i < inherit->num_qgroups; ++i) { diff --git a/trunk/fs/btrfs/root-tree.c b/trunk/fs/btrfs/root-tree.c index 10d8e4d88071..6bb465cca20f 100644 --- a/trunk/fs/btrfs/root-tree.c +++ b/trunk/fs/btrfs/root-tree.c @@ -544,8 +544,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans, struct timespec ct = CURRENT_TIME; spin_lock(&root->root_times_lock); - item->ctransid = cpu_to_le64(trans->transid); + item->ctransid = trans->transid; item->ctime.sec = cpu_to_le64(ct.tv_sec); - item->ctime.nsec = cpu_to_le32(ct.tv_nsec); + item->ctime.nsec = cpu_to_le64(ct.tv_nsec); spin_unlock(&root->root_times_lock); } diff --git a/trunk/fs/btrfs/super.c b/trunk/fs/btrfs/super.c index 83d6f9f9c220..f2eb24c477a3 100644 --- a/trunk/fs/btrfs/super.c +++ b/trunk/fs/btrfs/super.c @@ -838,6 +838,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait) struct btrfs_trans_handle *trans; struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root = fs_info->tree_root; + int ret; trace_btrfs_sync_fs(wait); @@ -848,17 +849,11 @@ int btrfs_sync_fs(struct super_block *sb, int wait) btrfs_wait_ordered_extents(root, 0, 0); - spin_lock(&fs_info->trans_lock); - if (!fs_info->running_transaction) { - spin_unlock(&fs_info->trans_lock); - return 0; - } - spin_unlock(&fs_info->trans_lock); - - trans = btrfs_join_transaction(root); + trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) return PTR_ERR(trans); - return btrfs_commit_transaction(trans, root); + ret = btrfs_commit_transaction(trans, root); + return ret; } static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) @@ -1535,8 +1530,6 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) while (cur_devices) { head = &cur_devices->devices; list_for_each_entry(dev, head, dev_list) { - if (dev->missing) - continue; if (!first_dev || dev->devid < first_dev->devid) first_dev = dev; } diff --git a/trunk/fs/btrfs/transaction.c b/trunk/fs/btrfs/transaction.c index 27c26004e050..17be3dedacba 100644 --- a/trunk/fs/btrfs/transaction.c +++ b/trunk/fs/btrfs/transaction.c @@ -1031,7 +1031,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, btrfs_i_size_write(parent_inode, parent_inode->i_size + dentry->d_name.len * 2); - parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, parent_root, parent_inode); if (ret) goto abort_trans_dput; @@ -1067,7 +1066,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, memcpy(new_root_item->parent_uuid, root->root_item.uuid, BTRFS_UUID_SIZE); new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec); - new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec); + new_root_item->otime.nsec = cpu_to_le64(cur_time.tv_nsec); btrfs_set_root_otransid(new_root_item, trans->transid); memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); diff --git a/trunk/fs/btrfs/volumes.c b/trunk/fs/btrfs/volumes.c index 88b969aeeb71..e86ae04abe6a 100644 --- a/trunk/fs/btrfs/volumes.c +++ b/trunk/fs/btrfs/volumes.c @@ -227,8 +227,9 @@ static noinline void run_scheduled_bios(struct btrfs_device *device) cur = pending; pending = pending->bi_next; cur->bi_next = NULL; + atomic_dec(&fs_info->nr_async_bios); - if (atomic_dec_return(&fs_info->nr_async_bios) < limit && + if (atomic_read(&fs_info->nr_async_bios) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); @@ -568,11 +569,9 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) memcpy(new_device, device, sizeof(*new_device)); /* Safe because we are under uuid_mutex */ - if (device->name) { - name = rcu_string_strdup(device->name->str, GFP_NOFS); - BUG_ON(device->name && !name); /* -ENOMEM */ - rcu_assign_pointer(new_device->name, name); - } + name = rcu_string_strdup(device->name->str, GFP_NOFS); + BUG_ON(device->name && !name); /* -ENOMEM */ + rcu_assign_pointer(new_device->name, name); new_device->bdev = NULL; new_device->writeable = 0; new_device->in_fs_metadata = 0; @@ -4606,6 +4605,28 @@ int btrfs_read_sys_array(struct btrfs_root *root) return ret; } +struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root, + u64 logical, int mirror_num) +{ + struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; + int ret; + u64 map_length = 0; + struct btrfs_bio *bbio = NULL; + struct btrfs_device *device; + + BUG_ON(mirror_num == 0); + ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio, + mirror_num); + if (ret) { + BUG_ON(bbio != NULL); + return NULL; + } + BUG_ON(mirror_num != bbio->mirror_num); + device = bbio->stripes[mirror_num - 1].dev; + kfree(bbio); + return device; +} + int btrfs_read_chunk_tree(struct btrfs_root *root) { struct btrfs_path *path; diff --git a/trunk/fs/btrfs/volumes.h b/trunk/fs/btrfs/volumes.h index 53c06af92e8d..5479325987b3 100644 --- a/trunk/fs/btrfs/volumes.h +++ b/trunk/fs/btrfs/volumes.h @@ -289,6 +289,8 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, u64 *start, u64 *max_avail); +struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root, + u64 logical, int mirror_num); void btrfs_dev_stat_print_on_error(struct btrfs_device *device); void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); int btrfs_get_dev_stats(struct btrfs_root *root, diff --git a/trunk/fs/logfs/dev_bdev.c b/trunk/fs/logfs/dev_bdev.c index e784a217b500..df0de27c2733 100644 --- a/trunk/fs/logfs/dev_bdev.c +++ b/trunk/fs/logfs/dev_bdev.c @@ -26,7 +26,6 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) struct completion complete; bio_init(&bio); - bio.bi_max_vecs = 1; bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = PAGE_SIZE; @@ -96,11 +95,12 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, struct address_space *mapping = super->s_mapping_inode->i_mapping; struct bio *bio; struct page *page; - unsigned int max_pages; + struct request_queue *q = bdev_get_queue(sb->s_bdev); + unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); int i; - max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); - + if (max_pages > BIO_MAX_PAGES) + max_pages = BIO_MAX_PAGES; bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); @@ -190,11 +190,12 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, { struct logfs_super *super = logfs_super(sb); struct bio *bio; - unsigned int max_pages; + struct request_queue *q = bdev_get_queue(sb->s_bdev); + unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); int i; - max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); - + if (max_pages > BIO_MAX_PAGES) + max_pages = BIO_MAX_PAGES; bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); diff --git a/trunk/fs/logfs/inode.c b/trunk/fs/logfs/inode.c index 6984562738d3..a422f42238b2 100644 --- a/trunk/fs/logfs/inode.c +++ b/trunk/fs/logfs/inode.c @@ -156,26 +156,10 @@ static void __logfs_destroy_inode(struct inode *inode) call_rcu(&inode->i_rcu, logfs_i_callback); } -static void __logfs_destroy_meta_inode(struct inode *inode) -{ - struct logfs_inode *li = logfs_inode(inode); - BUG_ON(li->li_block); - call_rcu(&inode->i_rcu, logfs_i_callback); -} - static void logfs_destroy_inode(struct inode *inode) { struct logfs_inode *li = logfs_inode(inode); - if (inode->i_ino < LOGFS_RESERVED_INOS) { - /* - * The reserved inodes are never destroyed unless we are in - * unmont path. - */ - __logfs_destroy_meta_inode(inode); - return; - } - BUG_ON(list_empty(&li->li_freeing_list)); spin_lock(&logfs_inode_lock); li->li_refcount--; @@ -389,8 +373,8 @@ static void logfs_put_super(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); /* kill the meta-inodes */ - iput(super->s_segfile_inode); iput(super->s_master_inode); + iput(super->s_segfile_inode); iput(super->s_mapping_inode); } diff --git a/trunk/fs/logfs/journal.c b/trunk/fs/logfs/journal.c index 2a09b8d73989..1e1c369df22b 100644 --- a/trunk/fs/logfs/journal.c +++ b/trunk/fs/logfs/journal.c @@ -565,7 +565,7 @@ static void write_wbuf(struct super_block *sb, struct logfs_area *area, index = ofs >> PAGE_SHIFT; page_ofs = ofs & (PAGE_SIZE - 1); - page = find_or_create_page(mapping, index, GFP_NOFS); + page = find_lock_page(mapping, index); BUG_ON(!page); memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize); unlock_page(page); diff --git a/trunk/fs/logfs/readwrite.c b/trunk/fs/logfs/readwrite.c index 5be0abef603d..f1cb512c5019 100644 --- a/trunk/fs/logfs/readwrite.c +++ b/trunk/fs/logfs/readwrite.c @@ -2189,6 +2189,7 @@ void logfs_evict_inode(struct inode *inode) return; } + BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS); page = inode_to_page(inode); BUG_ON(!page); /* FIXME: Use emergency page */ logfs_put_write_page(page); diff --git a/trunk/fs/logfs/segment.c b/trunk/fs/logfs/segment.c index 038da0991794..e28d090c98d6 100644 --- a/trunk/fs/logfs/segment.c +++ b/trunk/fs/logfs/segment.c @@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb) static void map_invalidatepage(struct page *page, unsigned long l) { - return; + BUG(); } static int map_releasepage(struct page *page, gfp_t g) diff --git a/trunk/include/drm/drm_crtc.h b/trunk/include/drm/drm_crtc.h index bfacf0d5a225..ced362533e3c 100644 --- a/trunk/include/drm/drm_crtc.h +++ b/trunk/include/drm/drm_crtc.h @@ -118,8 +118,7 @@ enum drm_mode_status { .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ - .vscan = (vs), .flags = (f), .vrefresh = 0, \ - .base.type = DRM_MODE_OBJECT_MODE + .vscan = (vs), .flags = (f), .vrefresh = 0 #define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ diff --git a/trunk/include/drm/drm_mode.h b/trunk/include/drm/drm_mode.h index 3d6301b6ec16..5581980b14f6 100644 --- a/trunk/include/drm/drm_mode.h +++ b/trunk/include/drm/drm_mode.h @@ -359,9 +359,8 @@ struct drm_mode_mode_cmd { struct drm_mode_modeinfo mode; }; -#define DRM_MODE_CURSOR_BO 0x01 -#define DRM_MODE_CURSOR_MOVE 0x02 -#define DRM_MODE_CURSOR_FLAGS 0x03 +#define DRM_MODE_CURSOR_BO (1<<0) +#define DRM_MODE_CURSOR_MOVE (1<<1) /* * depending on the value in flags different members are used. diff --git a/trunk/mm/slab.c b/trunk/mm/slab.c index 811af03a14ef..f8b0d539b482 100644 --- a/trunk/mm/slab.c +++ b/trunk/mm/slab.c @@ -3260,7 +3260,6 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, /* cache_grow can reenable interrupts, then ac could change. */ ac = cpu_cache_get(cachep); - node = numa_mem_id(); /* no objects in sight? abort */ if (!x && (ac->avail == 0 || force_refill)) diff --git a/trunk/virt/kvm/kvm_main.c b/trunk/virt/kvm/kvm_main.c index 246852397e30..d617f69131d7 100644 --- a/trunk/virt/kvm/kvm_main.c +++ b/trunk/virt/kvm/kvm_main.c @@ -1976,9 +1976,10 @@ static long kvm_vcpu_compat_ioctl(struct file *filp, if (copy_from_user(&csigset, sigmask_arg->sigset, sizeof csigset)) goto out; - } - sigset_from_compat(&sigset, &csigset); - r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); + sigset_from_compat(&sigset, &csigset); + r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); + } else + r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); break; } default: