diff --git a/[refs] b/[refs] index 08edaa5db179..56f8402f4485 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: cb5520f02c010e3cb974b9ac06f30aafa2eebc38 +refs/heads/master: 4a683a2c5e7cabe91218db28e56dc25e1b134ce3 diff --git a/trunk/arch/arm/mach-omap1/lcd_dma.c b/trunk/arch/arm/mach-omap1/lcd_dma.c index 453809359ba6..c9088d85da04 100644 --- a/trunk/arch/arm/mach-omap1/lcd_dma.c +++ b/trunk/arch/arm/mach-omap1/lcd_dma.c @@ -37,7 +37,7 @@ int omap_lcd_dma_running(void) * On OMAP1510, internal LCD controller will start the transfer * when it gets enabled, so assume DMA running if LCD enabled. */ - if (cpu_is_omap15xx()) + if (cpu_is_omap1510()) if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN) return 1; @@ -95,7 +95,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer); void omap_set_lcd_dma_b1_rotation(int rotate) { - if (cpu_is_omap15xx()) { + if (cpu_is_omap1510()) { printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n"); BUG(); return; @@ -106,7 +106,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation); void omap_set_lcd_dma_b1_mirror(int mirror) { - if (cpu_is_omap15xx()) { + if (cpu_is_omap1510()) { printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n"); BUG(); } @@ -116,7 +116,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror); void omap_set_lcd_dma_b1_vxres(unsigned long vxres) { - if (cpu_is_omap15xx()) { + if (cpu_is_omap1510()) { printk(KERN_ERR "DMA virtual resulotion is not supported " "in 1510 mode\n"); BUG(); @@ -127,7 +127,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres); void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale) { - if (cpu_is_omap15xx()) { + if (cpu_is_omap1510()) { printk(KERN_ERR "DMA scale is not supported in 1510 mode\n"); BUG(); } @@ -177,7 +177,7 @@ static void set_b1_regs(void) bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); /* 1510 DMA requires the bottom address to be 2 more * than the actual last memory access location. */ - if (cpu_is_omap15xx() && + if (cpu_is_omap1510() && lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32) bottom += 2; ei = PIXSTEP(0, 0, 1, 0); @@ -241,7 +241,7 @@ static void set_b1_regs(void) return; /* Suppress warning about uninitialized vars */ } - if (cpu_is_omap15xx()) { + if (cpu_is_omap1510()) { omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U); omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L); omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U); @@ -343,7 +343,7 @@ void omap_free_lcd_dma(void) BUG(); return; } - if (!cpu_is_omap15xx()) + if (!cpu_is_omap1510()) omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR); lcd_dma.reserved = 0; @@ -360,7 +360,7 @@ void omap_enable_lcd_dma(void) * connected. Otherwise the OMAP internal controller will * start the transfer when it gets enabled. */ - if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) + if (cpu_is_omap1510() || !lcd_dma.ext_ctrl) return; w = omap_readw(OMAP1610_DMA_LCD_CTRL); @@ -378,14 +378,14 @@ EXPORT_SYMBOL(omap_enable_lcd_dma); void omap_setup_lcd_dma(void) { BUG_ON(lcd_dma.active); - if (!cpu_is_omap15xx()) { + if (!cpu_is_omap1510()) { /* Set some reasonable defaults */ omap_writew(0x5440, OMAP1610_DMA_LCD_CCR); omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP); omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL); } set_b1_regs(); - if (!cpu_is_omap15xx()) { + if (!cpu_is_omap1510()) { u16 w; w = omap_readw(OMAP1610_DMA_LCD_CCR); @@ -407,7 +407,7 @@ void omap_stop_lcd_dma(void) u16 w; lcd_dma.active = 0; - if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) + if (cpu_is_omap1510() || !lcd_dma.ext_ctrl) return; w = omap_readw(OMAP1610_DMA_LCD_CCR); diff --git a/trunk/arch/arm/mach-omap1/time.c b/trunk/arch/arm/mach-omap1/time.c index 6885d2fac183..f83fc335c613 100644 --- a/trunk/arch/arm/mach-omap1/time.c +++ b/trunk/arch/arm/mach-omap1/time.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/arm/mach-omap2/board-devkit8000.c b/trunk/arch/arm/mach-omap2/board-devkit8000.c index 9a2a31e011ce..e906e05bb41b 100644 --- a/trunk/arch/arm/mach-omap2/board-devkit8000.c +++ b/trunk/arch/arm/mach-omap2/board-devkit8000.c @@ -115,6 +115,9 @@ static struct omap2_hsmmc_info mmc[] = { static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev) { + twl_i2c_write_u8(TWL4030_MODULE_GPIO, 0x80, REG_GPIODATADIR1); + twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0); + if (gpio_is_valid(dssdev->reset_gpio)) gpio_set_value_cansleep(dssdev->reset_gpio, 1); return 0; @@ -244,8 +247,6 @@ static struct gpio_led gpio_leds[]; static int devkit8000_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { - int ret; - omap_mux_init_gpio(29, OMAP_PIN_INPUT); /* gpio + 0 is "mmc0_cd" (input/IRQ) */ mmc[0].gpio_cd = gpio + 0; @@ -254,23 +255,17 @@ static int devkit8000_twl_gpio_setup(struct device *dev, /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; - /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */ - devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0; - ret = gpio_request_one(devkit8000_lcd_device.reset_gpio, - GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN"); - if (ret < 0) { - devkit8000_lcd_device.reset_gpio = -EINVAL; - printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n"); - } + /* gpio + 1 is "LCD_PWREN" (out, active high) */ + devkit8000_lcd_device.reset_gpio = gpio + 1; + gpio_request(devkit8000_lcd_device.reset_gpio, "LCD_PWREN"); + /* Disable until needed */ + gpio_direction_output(devkit8000_lcd_device.reset_gpio, 0); /* gpio + 7 is "DVI_PD" (out, active low) */ devkit8000_dvi_device.reset_gpio = gpio + 7; - ret = gpio_request_one(devkit8000_dvi_device.reset_gpio, - GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown"); - if (ret < 0) { - devkit8000_dvi_device.reset_gpio = -EINVAL; - printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n"); - } + gpio_request(devkit8000_dvi_device.reset_gpio, "DVI PowerDown"); + /* Disable until needed */ + gpio_direction_output(devkit8000_dvi_device.reset_gpio, 0); return 0; } diff --git a/trunk/arch/arm/mach-omap2/board-omap4panda.c b/trunk/arch/arm/mach-omap2/board-omap4panda.c index e944025d5ef8..e001a048dc0c 100644 --- a/trunk/arch/arm/mach-omap2/board-omap4panda.c +++ b/trunk/arch/arm/mach-omap2/board-omap4panda.c @@ -409,6 +409,8 @@ static void __init omap4_panda_init(void) platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); omap_serial_init(); omap4_twl6030_hsmmc_init(mmc); + /* OMAP4 Panda uses internal transceiver so register nop transceiver */ + usb_nop_xceiv_register(); omap4_ehci_init(); usb_musb_init(&musb_board_data); } diff --git a/trunk/arch/arm/mach-omap2/board-rm680.c b/trunk/arch/arm/mach-omap2/board-rm680.c index 39a71bb8a308..cb77be7ac44f 100644 --- a/trunk/arch/arm/mach-omap2/board-rm680.c +++ b/trunk/arch/arm/mach-omap2/board-rm680.c @@ -40,6 +40,9 @@ static struct regulator_consumer_supply rm680_vemmc_consumers[] = { static struct regulator_init_data rm680_vemmc = { .constraints = { .name = "rm680_vemmc", + .min_uV = 2900000, + .max_uV = 2900000, + .apply_uV = 1, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_STATUS diff --git a/trunk/arch/arm/mach-omap2/mux.c b/trunk/arch/arm/mach-omap2/mux.c index 98148b6c36e9..fae49d12bc76 100644 --- a/trunk/arch/arm/mach-omap2/mux.c +++ b/trunk/arch/arm/mach-omap2/mux.c @@ -1000,7 +1000,6 @@ int __init omap_mux_init(const char *name, u32 flags, if (!partition->base) { pr_err("%s: Could not ioremap mux partition at 0x%08x\n", __func__, partition->phys); - kfree(partition); return -ENODEV; } diff --git a/trunk/arch/arm/mach-omap2/pm34xx.c b/trunk/arch/arm/mach-omap2/pm34xx.c index 2f864e4b085d..a4aa1920a75c 100644 --- a/trunk/arch/arm/mach-omap2/pm34xx.c +++ b/trunk/arch/arm/mach-omap2/pm34xx.c @@ -168,10 +168,9 @@ static void omap3_core_restore_context(void) * once during boot sequence, but this works as we are not using secure * services. */ -static void omap3_save_secure_ram_context(void) +static void omap3_save_secure_ram_context(u32 target_mpu_state) { u32 ret; - int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { /* @@ -182,7 +181,7 @@ static void omap3_save_secure_ram_context(void) pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); ret = _omap_save_secure_sram((u32 *) __pa(omap3_secure_ram_storage)); - pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); + pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state); /* Following is for error tracking, it should not happen */ if (ret) { printk(KERN_ERR "save_secure_sram() returns %08x\n", @@ -1095,7 +1094,7 @@ static int __init omap3_pm_init(void) local_fiq_disable(); omap_dma_global_context_save(); - omap3_save_secure_ram_context(); + omap3_save_secure_ram_context(PWRDM_POWER_ON); omap_dma_global_context_restore(); local_irq_enable(); diff --git a/trunk/arch/arm/mach-omap2/smartreflex.c b/trunk/arch/arm/mach-omap2/smartreflex.c index c37e823266d3..77ecebf3fae2 100644 --- a/trunk/arch/arm/mach-omap2/smartreflex.c +++ b/trunk/arch/arm/mach-omap2/smartreflex.c @@ -780,7 +780,8 @@ static int omap_sr_autocomp_show(void *data, u64 *val) struct omap_sr *sr_info = (struct omap_sr *) data; if (!sr_info) { - pr_warning("%s: omap_sr struct not found\n", __func__); + pr_warning("%s: omap_sr struct for sr_%s not found\n", + __func__, sr_info->voltdm->name); return -EINVAL; } @@ -794,7 +795,8 @@ static int omap_sr_autocomp_store(void *data, u64 val) struct omap_sr *sr_info = (struct omap_sr *) data; if (!sr_info) { - pr_warning("%s: omap_sr struct not found\n", __func__); + pr_warning("%s: omap_sr struct for sr_%s not found\n", + __func__, sr_info->voltdm->name); return -EINVAL; } @@ -832,8 +834,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); - ret = -EINVAL; - goto err_free_devinfo; + return -EINVAL; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -965,7 +966,7 @@ static int __devexit omap_sr_remove(struct platform_device *pdev) } sr_info = _sr_lookup(pdata->voltdm); - if (IS_ERR(sr_info)) { + if (!sr_info) { dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", __func__); return -EINVAL; diff --git a/trunk/arch/arm/mach-omap2/voltage.c b/trunk/arch/arm/mach-omap2/voltage.c index 12be525b8df4..ed6079c94c57 100644 --- a/trunk/arch/arm/mach-omap2/voltage.c +++ b/trunk/arch/arm/mach-omap2/voltage.c @@ -471,7 +471,6 @@ static void __init vdd_debugfs_init(struct omap_vdd_info *vdd) strcat(name, vdd->voltdm.name); vdd->debug_dir = debugfs_create_dir(name, voltage_dir); - kfree(name); if (IS_ERR(vdd->debug_dir)) { pr_warning("%s: Unable to create debugfs directory for" " vdd_%s\n", __func__, vdd->voltdm.name); diff --git a/trunk/arch/arm/tools/mach-types b/trunk/arch/arm/tools/mach-types index 2fea897ebeb1..9d6feaabbe7d 100644 --- a/trunk/arch/arm/tools/mach-types +++ b/trunk/arch/arm/tools/mach-types @@ -12,7 +12,7 @@ # # http://www.arm.linux.org.uk/developer/machines/?action=new # -# Last update: Sun Dec 12 23:24:27 2010 +# Last update: Mon Feb 7 08:59:27 2011 # # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # @@ -2240,7 +2240,7 @@ arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250 vs_v210 MACH_VS_V210 VS_V210 2252 vs_v212 MACH_VS_V212 VS_V212 2253 hmt MACH_HMT HMT 2254 -suen3 MACH_SUEN3 SUEN3 2255 +km_kirkwood MACH_KM_KIRKWOOD KM_KIRKWOOD 2255 vesper MACH_VESPER VESPER 2256 str9 MACH_STR9 STR9 2257 omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258 @@ -2987,7 +2987,7 @@ pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001 ea20 MACH_EA20 EA20 3002 awm2 MACH_AWM2 AWM2 3003 ti8148evm MACH_TI8148EVM TI8148EVM 3004 -tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005 +seaboard MACH_SEABOARD SEABOARD 3005 linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006 tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007 rubys MACH_RUBYS RUBYS 3008 @@ -3190,7 +3190,7 @@ synergy MACH_SYNERGY SYNERGY 3205 ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 punica MACH_PUNICA PUNICA 3208 -sbc_nt250 MACH_SBC_NT250 SBC_NT250 3209 +trimslice MACH_TRIMSLICE TRIMSLICE 3209 mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210 mackerel MACH_MACKEREL MACKEREL 3211 fa9x27 MACH_FA9X27 FA9X27 3213 @@ -3219,3 +3219,100 @@ pivicc MACH_PIVICC PIVICC 3235 pcm048 MACH_PCM048 PCM048 3236 dds MACH_DDS DDS 3237 chalten_xa1 MACH_CHALTEN_XA1 CHALTEN_XA1 3238 +ts48xx MACH_TS48XX TS48XX 3239 +tonga2_tfttimer MACH_TONGA2_TFTTIMER TONGA2_TFTTIMER 3240 +whistler MACH_WHISTLER WHISTLER 3241 +asl_phoenix MACH_ASL_PHOENIX ASL_PHOENIX 3242 +at91sam9263otlite MACH_AT91SAM9263OTLITE AT91SAM9263OTLITE 3243 +ddplug MACH_DDPLUG DDPLUG 3244 +d2plug MACH_D2PLUG D2PLUG 3245 +kzm9d MACH_KZM9D KZM9D 3246 +verdi_lte MACH_VERDI_LTE VERDI_LTE 3247 +nanozoom MACH_NANOZOOM NANOZOOM 3248 +dm3730_som_lv MACH_DM3730_SOM_LV DM3730_SOM_LV 3249 +dm3730_torpedo MACH_DM3730_TORPEDO DM3730_TORPEDO 3250 +anchovy MACH_ANCHOVY ANCHOVY 3251 +re2rev20 MACH_RE2REV20 RE2REV20 3253 +re2rev21 MACH_RE2REV21 RE2REV21 3254 +cns21xx MACH_CNS21XX CNS21XX 3255 +rider MACH_RIDER RIDER 3257 +nsk330 MACH_NSK330 NSK330 3258 +cns2133evb MACH_CNS2133EVB CNS2133EVB 3259 +z3_816x_mod MACH_Z3_816X_MOD Z3_816X_MOD 3260 +z3_814x_mod MACH_Z3_814X_MOD Z3_814X_MOD 3261 +beect MACH_BEECT BEECT 3262 +dma_thunderbug MACH_DMA_THUNDERBUG DMA_THUNDERBUG 3263 +omn_at91sam9g20 MACH_OMN_AT91SAM9G20 OMN_AT91SAM9G20 3264 +mx25_e2s_uc MACH_MX25_E2S_UC MX25_E2S_UC 3265 +mione MACH_MIONE MIONE 3266 +top9000_tcu MACH_TOP9000_TCU TOP9000_TCU 3267 +top9000_bsl MACH_TOP9000_BSL TOP9000_BSL 3268 +kingdom MACH_KINGDOM KINGDOM 3269 +armadillo460 MACH_ARMADILLO460 ARMADILLO460 3270 +lq2 MACH_LQ2 LQ2 3271 +sweda_tms2 MACH_SWEDA_TMS2 SWEDA_TMS2 3272 +mx53_loco MACH_MX53_LOCO MX53_LOCO 3273 +acer_a8 MACH_ACER_A8 ACER_A8 3275 +acer_gauguin MACH_ACER_GAUGUIN ACER_GAUGUIN 3276 +guppy MACH_GUPPY GUPPY 3277 +mx61_ard MACH_MX61_ARD MX61_ARD 3278 +tx53 MACH_TX53 TX53 3279 +omapl138_case_a3 MACH_OMAPL138_CASE_A3 OMAPL138_CASE_A3 3280 +uemd MACH_UEMD UEMD 3281 +ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282 +rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283 +nookcolor MACH_NOOKCOLOR NOOKCOLOR 3284 +hkdkc100 MACH_HKDKC100 HKDKC100 3285 +ts42xx MACH_TS42XX TS42XX 3286 +aebl MACH_AEBL AEBL 3287 +wario MACH_WARIO WARIO 3288 +gfs_spm MACH_GFS_SPM GFS_SPM 3289 +cm_t3730 MACH_CM_T3730 CM_T3730 3290 +isc3 MACH_ISC3 ISC3 3291 +rascal MACH_RASCAL RASCAL 3292 +hrefv60 MACH_HREFV60 HREFV60 3293 +tpt_2_0 MACH_TPT_2_0 TPT_2_0 3294 +pyramid_td MACH_PYRAMID_TD PYRAMID_TD 3295 +splendor MACH_SPLENDOR SPLENDOR 3296 +guf_planet MACH_GUF_PLANET GUF_PLANET 3297 +msm8x60_qt MACH_MSM8X60_QT MSM8X60_QT 3298 +htc_hd_mini MACH_HTC_HD_MINI HTC_HD_MINI 3299 +athene MACH_ATHENE ATHENE 3300 +deep_r_ek_1 MACH_DEEP_R_EK_1 DEEP_R_EK_1 3301 +vivow_ct MACH_VIVOW_CT VIVOW_CT 3302 +nery_1000 MACH_NERY_1000 NERY_1000 3303 +rfl109145_ssrv MACH_RFL109145_SSRV RFL109145_SSRV 3304 +nmh MACH_NMH NMH 3305 +wn802t MACH_WN802T WN802T 3306 +dragonet MACH_DRAGONET DRAGONET 3307 +geneva_b MACH_GENEVA_B GENEVA_B 3308 +at91sam9263desk16l MACH_AT91SAM9263DESK16L AT91SAM9263DESK16L 3309 +bcmhana_sv MACH_BCMHANA_SV BCMHANA_SV 3310 +bcmhana_tablet MACH_BCMHANA_TABLET BCMHANA_TABLET 3311 +koi MACH_KOI KOI 3312 +ts4800 MACH_TS4800 TS4800 3313 +tqma9263 MACH_TQMA9263 TQMA9263 3314 +holiday MACH_HOLIDAY HOLIDAY 3315 +dma_6410 MACH_DMA6410 DMA6410 3316 +pcats_overlay MACH_PCATS_OVERLAY PCATS_OVERLAY 3317 +hwgw6410 MACH_HWGW6410 HWGW6410 3318 +shenzhou MACH_SHENZHOU SHENZHOU 3319 +cwme9210 MACH_CWME9210 CWME9210 3320 +cwme9210js MACH_CWME9210JS CWME9210JS 3321 +pgs_v1 MACH_PGS_SITARA PGS_SITARA 3322 +colibri_tegra2 MACH_COLIBRI_TEGRA2 COLIBRI_TEGRA2 3323 +w21 MACH_W21 W21 3324 +polysat1 MACH_POLYSAT1 POLYSAT1 3325 +dataway MACH_DATAWAY DATAWAY 3326 +cobral138 MACH_COBRAL138 COBRAL138 3327 +roverpcs8 MACH_ROVERPCS8 ROVERPCS8 3328 +marvelc MACH_MARVELC MARVELC 3329 +navefihid MACH_NAVEFIHID NAVEFIHID 3330 +dm365_cv100 MACH_DM365_CV100 DM365_CV100 3331 +able MACH_ABLE ABLE 3332 +legacy MACH_LEGACY LEGACY 3333 +icong MACH_ICONG ICONG 3334 +rover_g8 MACH_ROVER_G8 ROVER_G8 3335 +t5388p MACH_T5388P T5388P 3336 +dingo MACH_DINGO DINGO 3337 +goflexhome MACH_GOFLEXHOME GOFLEXHOME 3338 diff --git a/trunk/arch/powerpc/include/asm/mmu-book3e.h b/trunk/arch/powerpc/include/asm/mmu-book3e.h index 17194fcd4040..8eaed81ea642 100644 --- a/trunk/arch/powerpc/include/asm/mmu-book3e.h +++ b/trunk/arch/powerpc/include/asm/mmu-book3e.h @@ -40,8 +40,8 @@ /* MAS registers bit definitions */ -#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) -#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000) +#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) +#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) #define MAS0_NV(x) ((x) & 0x00000FFF) #define MAS0_HES 0x00004000 #define MAS0_WQ_ALLWAYS 0x00000000 @@ -50,12 +50,12 @@ #define MAS1_VALID 0x80000000 #define MAS1_IPROT 0x40000000 -#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000) +#define MAS1_TID(x) ((x << 16) & 0x3FFF0000) #define MAS1_IND 0x00002000 #define MAS1_TS 0x00001000 #define MAS1_TSIZE_MASK 0x00000f80 #define MAS1_TSIZE_SHIFT 7 -#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) +#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) #define MAS2_EPN 0xFFFFF000 #define MAS2_X0 0x00000040 diff --git a/trunk/arch/powerpc/include/asm/page.h b/trunk/arch/powerpc/include/asm/page.h index da4b20008541..53b64be40eb2 100644 --- a/trunk/arch/powerpc/include/asm/page.h +++ b/trunk/arch/powerpc/include/asm/page.h @@ -101,7 +101,7 @@ extern phys_addr_t kernstart_addr; #ifdef CONFIG_FLATMEM #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) -#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) +#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr)) #endif #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) diff --git a/trunk/arch/powerpc/kernel/cpu_setup_6xx.S b/trunk/arch/powerpc/kernel/cpu_setup_6xx.S index f8cd9fba4d35..55cba4a8a959 100644 --- a/trunk/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/trunk/arch/powerpc/kernel/cpu_setup_6xx.S @@ -18,7 +18,7 @@ #include _GLOBAL(__setup_cpu_603) - mflr r5 + mflr r4 BEGIN_MMU_FTR_SECTION li r10,0 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ @@ -27,60 +27,60 @@ BEGIN_FTR_SECTION bl __init_fpu_registers END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) bl setup_common_caches - mtlr r5 + mtlr r4 blr _GLOBAL(__setup_cpu_604) - mflr r5 + mflr r4 bl setup_common_caches bl setup_604_hid0 - mtlr r5 + mtlr r4 blr _GLOBAL(__setup_cpu_750) - mflr r5 + mflr r4 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 - mtlr r5 + mtlr r4 blr _GLOBAL(__setup_cpu_750cx) - mflr r5 + mflr r4 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750cx - mtlr r5 + mtlr r4 blr _GLOBAL(__setup_cpu_750fx) - mflr r5 + mflr r4 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750fx - mtlr r5 + mtlr r4 blr _GLOBAL(__setup_cpu_7400) - mflr r5 + mflr r4 bl __init_fpu_registers bl setup_7400_workarounds bl setup_common_caches bl setup_750_7400_hid0 - mtlr r5 + mtlr r4 blr _GLOBAL(__setup_cpu_7410) - mflr r5 + mflr r4 bl __init_fpu_registers bl setup_7410_workarounds bl setup_common_caches bl setup_750_7400_hid0 li r3,0 mtspr SPRN_L2CR2,r3 - mtlr r5 + mtlr r4 blr _GLOBAL(__setup_cpu_745x) - mflr r5 + mflr r4 bl setup_common_caches bl setup_745x_specifics - mtlr r5 + mtlr r4 blr /* Enable caches for 603's, 604, 750 & 7400 */ @@ -194,10 +194,10 @@ setup_750cx: cror 4*cr0+eq,4*cr0+eq,4*cr1+eq cror 4*cr0+eq,4*cr0+eq,4*cr2+eq bnelr - lwz r6,CPU_SPEC_FEATURES(r4) + lwz r6,CPU_SPEC_FEATURES(r5) li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r4) + stw r6,CPU_SPEC_FEATURES(r5) blr /* 750fx specific @@ -225,12 +225,12 @@ BEGIN_FTR_SECTION andis. r11,r11,L3CR_L3E@h beq 1f END_FTR_SECTION_IFSET(CPU_FTR_L3CR) - lwz r6,CPU_SPEC_FEATURES(r4) + lwz r6,CPU_SPEC_FEATURES(r5) andi. r0,r6,CPU_FTR_L3_DISABLE_NAP beq 1f li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r4) + stw r6,CPU_SPEC_FEATURES(r5) 1: mfspr r11,SPRN_HID0 diff --git a/trunk/arch/powerpc/kernel/cputable.c b/trunk/arch/powerpc/kernel/cputable.c index e8e915ce3d8d..8d74a24c5502 100644 --- a/trunk/arch/powerpc/kernel/cputable.c +++ b/trunk/arch/powerpc/kernel/cputable.c @@ -2076,8 +2076,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) * pointer on ppc64 and booke as we are running at 0 in real mode * on ppc64 and reloc_offset is always 0 on booke. */ - if (t->cpu_setup) { - t->cpu_setup(offset, t); + if (s->cpu_setup) { + s->cpu_setup(offset, s); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ } diff --git a/trunk/arch/powerpc/mm/numa.c b/trunk/arch/powerpc/mm/numa.c index fd4812329570..bf5cb91f07de 100644 --- a/trunk/arch/powerpc/mm/numa.c +++ b/trunk/arch/powerpc/mm/numa.c @@ -186,7 +186,7 @@ static void unmap_cpu_from_node(unsigned long cpu) dbg("removing cpu %lu from node %d\n", cpu, node); if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { - cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); + cpumask_set_cpu(cpu, node_to_cpumask_map[node]); } else { printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", cpu, node); @@ -1289,9 +1289,10 @@ u64 memory_hotplug_max(void) } #endif /* CONFIG_MEMORY_HOTPLUG */ -/* Virtual Processor Home Node (VPHN) support */ +/* Vrtual Processor Home Node (VPHN) support */ #ifdef CONFIG_PPC_SPLPAR -static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; +#define VPHN_NR_CHANGE_CTRS (8) +static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS]; static cpumask_t cpu_associativity_changes_mask; static int vphn_enabled; static void set_topology_timer(void); @@ -1302,18 +1303,16 @@ static void set_topology_timer(void); */ static void setup_cpu_associativity_change_counters(void) { - int cpu; - - /* The VPHN feature supports a maximum of 8 reference points */ - BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); + int cpu = 0; for_each_possible_cpu(cpu) { - int i; + int i = 0; u8 *counts = vphn_cpu_change_counts[cpu]; volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; - for (i = 0; i < distance_ref_points_depth; i++) + for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { counts[i] = hypervisor_counts[i]; + } } } @@ -1330,7 +1329,7 @@ static void setup_cpu_associativity_change_counters(void) */ static int update_cpu_associativity_changes_mask(void) { - int cpu, nr_cpus = 0; + int cpu = 0, nr_cpus = 0; cpumask_t *changes = &cpu_associativity_changes_mask; cpumask_clear(changes); @@ -1340,8 +1339,8 @@ static int update_cpu_associativity_changes_mask(void) u8 *counts = vphn_cpu_change_counts[cpu]; volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; - for (i = 0; i < distance_ref_points_depth; i++) { - if (hypervisor_counts[i] != counts[i]) { + for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { + if (hypervisor_counts[i] > counts[i]) { counts[i] = hypervisor_counts[i]; changed = 1; } @@ -1355,11 +1354,8 @@ static int update_cpu_associativity_changes_mask(void) return nr_cpus; } -/* - * 6 64-bit registers unpacked into 12 32-bit associativity values. To form - * the complete property we have to add the length in the first cell. - */ -#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) +/* 6 64-bit registers unpacked into 12 32-bit associativity values */ +#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32)) /* * Convert the associativity domain numbers returned from the hypervisor @@ -1367,14 +1363,15 @@ static int update_cpu_associativity_changes_mask(void) */ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) { - int i, nr_assoc_doms = 0; + int i = 0; + int nr_assoc_doms = 0; const u16 *field = (const u16*) packed; #define VPHN_FIELD_UNUSED (0xffff) #define VPHN_FIELD_MSB (0x8000) #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) - for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { + for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) { if (*field == VPHN_FIELD_UNUSED) { /* All significant fields processed, and remaining * fields contain the reserved value of all 1's. @@ -1382,12 +1379,14 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) */ unpacked[i] = *((u32*)field); field += 2; - } else if (*field & VPHN_FIELD_MSB) { + } + else if (*field & VPHN_FIELD_MSB) { /* Data is in the lower 15 bits of this field */ unpacked[i] = *field & VPHN_FIELD_MASK; field++; nr_assoc_doms++; - } else { + } + else { /* Data is in the lower 15 bits of this field * concatenated with the next 16 bit field */ @@ -1397,9 +1396,6 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) } } - /* The first cell contains the length of the property */ - unpacked[0] = nr_assoc_doms; - return nr_assoc_doms; } @@ -1409,7 +1405,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) */ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) { - long rc; + long rc = 0; long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; u64 flags = 1; int hwcpu = get_hard_smp_processor_id(cpu); @@ -1423,7 +1419,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) static long vphn_get_associativity(unsigned long cpu, unsigned int *associativity) { - long rc; + long rc = 0; rc = hcall_vphn(cpu, associativity); @@ -1449,9 +1445,9 @@ static long vphn_get_associativity(unsigned long cpu, */ int arch_update_cpu_topology(void) { - int cpu, nid, old_nid; + int cpu = 0, nid = 0, old_nid = 0; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; - struct sys_device *sysdev; + struct sys_device *sysdev = NULL; for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { vphn_get_associativity(cpu, associativity); @@ -1516,8 +1512,7 @@ int start_topology_update(void) { int rc = 0; - if (firmware_has_feature(FW_FEATURE_VPHN) && - get_lppaca()->shared_proc) { + if (firmware_has_feature(FW_FEATURE_VPHN)) { vphn_enabled = 1; setup_cpu_associativity_change_counters(); init_timer_deferrable(&topology_timer); diff --git a/trunk/arch/powerpc/platforms/pseries/lpar.c b/trunk/arch/powerpc/platforms/pseries/lpar.c index ca5d5898d320..5d3ea9f60dd7 100644 --- a/trunk/arch/powerpc/platforms/pseries/lpar.c +++ b/trunk/arch/powerpc/platforms/pseries/lpar.c @@ -713,13 +713,6 @@ EXPORT_SYMBOL(arch_free_page); /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ extern long hcall_tracepoint_refcount; -/* - * Since the tracing code might execute hcalls we need to guard against - * recursion. One example of this are spinlocks calling H_YIELD on - * shared processor partitions. - */ -static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); - void hcall_tracepoint_regfunc(void) { hcall_tracepoint_refcount++; @@ -732,42 +725,12 @@ void hcall_tracepoint_unregfunc(void) void __trace_hcall_entry(unsigned long opcode, unsigned long *args) { - unsigned long flags; - unsigned int *depth; - - local_irq_save(flags); - - depth = &__get_cpu_var(hcall_trace_depth); - - if (*depth) - goto out; - - (*depth)++; trace_hcall_entry(opcode, args); - (*depth)--; - -out: - local_irq_restore(flags); } void __trace_hcall_exit(long opcode, unsigned long retval, unsigned long *retbuf) { - unsigned long flags; - unsigned int *depth; - - local_irq_save(flags); - - depth = &__get_cpu_var(hcall_trace_depth); - - if (*depth) - goto out; - - (*depth)++; trace_hcall_exit(opcode, retval, retbuf); - (*depth)--; - -out: - local_irq_restore(flags); } #endif diff --git a/trunk/arch/x86/kernel/acpi/sleep.c b/trunk/arch/x86/kernel/acpi/sleep.c index 68d1537b8c81..4d9ebbab2230 100644 --- a/trunk/arch/x86/kernel/acpi/sleep.c +++ b/trunk/arch/x86/kernel/acpi/sleep.c @@ -12,8 +12,10 @@ #include #include #include + +#ifdef CONFIG_X86_32 #include -#include +#endif #include "realmode/wakeup.h" #include "sleep.h" @@ -147,15 +149,6 @@ void __init acpi_reserve_wakeup_memory(void) memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); } -int __init acpi_configure_wakeup_memory(void) -{ - if (acpi_realmode) - set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT); - - return 0; -} -arch_initcall(acpi_configure_wakeup_memory); - static int __init acpi_sleep_setup(char *str) { diff --git a/trunk/fs/btrfs/acl.c b/trunk/fs/btrfs/acl.c index 9c949348510b..15b5ca2a2606 100644 --- a/trunk/fs/btrfs/acl.c +++ b/trunk/fs/btrfs/acl.c @@ -37,9 +37,6 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) char *value = NULL; struct posix_acl *acl; - if (!IS_POSIXACL(inode)) - return NULL; - acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; @@ -87,9 +84,6 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name, struct posix_acl *acl; int ret = 0; - if (!IS_POSIXACL(dentry->d_inode)) - return -EOPNOTSUPP; - acl = btrfs_get_acl(dentry->d_inode, type); if (IS_ERR(acl)) diff --git a/trunk/fs/btrfs/compression.c b/trunk/fs/btrfs/compression.c index 4d2110eafe29..f745287fbf2e 100644 --- a/trunk/fs/btrfs/compression.c +++ b/trunk/fs/btrfs/compression.c @@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, u64 em_len; u64 em_start; struct extent_map *em; - int ret = -ENOMEM; + int ret; u32 *sums; tree = &BTRFS_I(inode)->io_tree; @@ -577,9 +577,6 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, compressed_len = em->block_len; cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); - if (!cb) - goto out; - atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; @@ -600,18 +597,13 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; - cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, + cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); - if (!cb->compressed_pages) - goto fail1; - bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; for (page_index = 0; page_index < nr_pages; page_index++) { cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (!cb->compressed_pages[page_index]) - goto fail2; } cb->nr_pages = nr_pages; @@ -622,8 +614,6 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->len = uncompressed_len; comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); - if (!comp_bio) - goto fail2; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; atomic_inc(&cb->pending_bios); @@ -691,17 +681,6 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, bio_put(comp_bio); return 0; - -fail2: - for (page_index = 0; page_index < nr_pages; page_index++) - free_page((unsigned long)cb->compressed_pages[page_index]); - - kfree(cb->compressed_pages); -fail1: - kfree(cb); -out: - free_extent_map(em); - return ret; } static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; @@ -921,7 +900,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, return ret; } -void btrfs_exit_compress(void) +void __exit btrfs_exit_compress(void) { free_workspaces(); } diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index fdce8799b98d..b531c36455d8 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -1550,7 +1550,6 @@ static int transaction_kthread(void *arg) spin_unlock(&root->fs_info->new_trans_lock); trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); if (transid == trans->transid) { ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); @@ -2454,14 +2453,10 @@ int btrfs_commit_super(struct btrfs_root *root) up_write(&root->fs_info->cleanup_work_sem); trans = btrfs_join_transaction(root, 1); - if (IS_ERR(trans)) - return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); /* run commit again to drop the original snapshot */ trans = btrfs_join_transaction(root, 1); - if (IS_ERR(trans)) - return PTR_ERR(trans); btrfs_commit_transaction(trans, root); ret = btrfs_write_and_wait_transaction(NULL, root); BUG_ON(ret); @@ -2559,8 +2554,6 @@ int close_ctree(struct btrfs_root *root) kfree(fs_info->chunk_root); kfree(fs_info->dev_root); kfree(fs_info->csum_root); - kfree(fs_info); - return 0; } diff --git a/trunk/fs/btrfs/export.c b/trunk/fs/btrfs/export.c index ff27d7a477b2..9786963b07e5 100644 --- a/trunk/fs/btrfs/export.c +++ b/trunk/fs/btrfs/export.c @@ -171,8 +171,6 @@ static struct dentry *btrfs_get_parent(struct dentry *child) int ret; path = btrfs_alloc_path(); - if (!path) - return ERR_PTR(-ENOMEM); if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index 4e7e012ad667..b55269340cec 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -320,6 +320,11 @@ static int caching_kthread(void *data) if (!path) return -ENOMEM; + exclude_super_stripes(extent_root, block_group); + spin_lock(&block_group->space_info->lock); + block_group->space_info->bytes_readonly += block_group->bytes_super; + spin_unlock(&block_group->space_info->lock); + last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); /* @@ -462,10 +467,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, cache->cached = BTRFS_CACHE_NO; } spin_unlock(&cache->lock); - if (ret == 1) { - free_excluded_extents(fs_info->extent_root, cache); + if (ret == 1) return 0; - } } if (load_cache_only) @@ -3341,10 +3344,8 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, u64 reserved; u64 max_reclaim; u64 reclaimed = 0; - long time_left; int pause = 1; int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; - int loops = 0; block_rsv = &root->fs_info->delalloc_block_rsv; space_info = block_rsv->space_info; @@ -3357,7 +3358,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, max_reclaim = min(reserved, to_reclaim); - while (loops < 1024) { + while (1) { /* have the flusher threads jump in and do some IO */ smp_mb(); nr_pages = min_t(unsigned long, nr_pages, @@ -3365,12 +3366,8 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); spin_lock(&space_info->lock); - if (reserved > space_info->bytes_reserved) { - loops = 0; + if (reserved > space_info->bytes_reserved) reclaimed += reserved - space_info->bytes_reserved; - } else { - loops++; - } reserved = space_info->bytes_reserved; spin_unlock(&space_info->lock); @@ -3381,12 +3378,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, return -EAGAIN; __set_current_state(TASK_INTERRUPTIBLE); - time_left = schedule_timeout(pause); - - /* We were interrupted, exit */ - if (time_left) - break; - + schedule_timeout(pause); pause <<= 1; if (pause > HZ / 10) pause = HZ / 10; @@ -3596,20 +3588,8 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, if (num_bytes > 0) { if (dest) { - spin_lock(&dest->lock); - if (!dest->full) { - u64 bytes_to_add; - - bytes_to_add = dest->size - dest->reserved; - bytes_to_add = min(num_bytes, bytes_to_add); - dest->reserved += bytes_to_add; - if (dest->reserved >= dest->size) - dest->full = 1; - num_bytes -= bytes_to_add; - } - spin_unlock(&dest->lock); - } - if (num_bytes) { + block_rsv_add_bytes(dest, num_bytes, 0); + } else { spin_lock(&space_info->lock); space_info->bytes_reserved -= num_bytes; spin_unlock(&space_info->lock); @@ -4032,7 +4012,6 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) num_bytes = ALIGN(num_bytes, root->sectorsize); atomic_dec(&BTRFS_I(inode)->outstanding_extents); - WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); spin_lock(&BTRFS_I(inode)->accounting_lock); nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); @@ -5654,7 +5633,6 @@ use_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize) { struct btrfs_block_rsv *block_rsv; - struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; int ret; block_rsv = get_block_rsv(trans, root); @@ -5662,39 +5640,14 @@ use_block_rsv(struct btrfs_trans_handle *trans, if (block_rsv->size == 0) { ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, 0); - /* - * If we couldn't reserve metadata bytes try and use some from - * the global reserve. - */ - if (ret && block_rsv != global_rsv) { - ret = block_rsv_use_bytes(global_rsv, blocksize); - if (!ret) - return global_rsv; - return ERR_PTR(ret); - } else if (ret) { + if (ret) return ERR_PTR(ret); - } return block_rsv; } ret = block_rsv_use_bytes(block_rsv, blocksize); if (!ret) return block_rsv; - if (ret) { - WARN_ON(1); - ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, - 0); - if (!ret) { - spin_lock(&block_rsv->lock); - block_rsv->size += blocksize; - spin_unlock(&block_rsv->lock); - return block_rsv; - } else if (ret && block_rsv != global_rsv) { - ret = block_rsv_use_bytes(global_rsv, blocksize); - if (!ret) - return global_rsv; - } - } return ERR_PTR(-ENOSPC); } @@ -6268,8 +6221,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root, BUG_ON(!wc); trans = btrfs_start_transaction(tree_root, 0); - BUG_ON(IS_ERR(trans)); - if (block_rsv) trans->block_rsv = block_rsv; @@ -6367,7 +6318,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root, btrfs_end_transaction_throttle(trans, tree_root); trans = btrfs_start_transaction(tree_root, 0); - BUG_ON(IS_ERR(trans)); if (block_rsv) trans->block_rsv = block_rsv; } @@ -6496,8 +6446,6 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start, int ret = 0; ra = kzalloc(sizeof(*ra), GFP_NOFS); - if (!ra) - return -ENOMEM; mutex_lock(&inode->i_mutex); first_index = start >> PAGE_CACHE_SHIFT; @@ -7529,7 +7477,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) BUG_ON(reloc_root->commit_root != NULL); while (1) { trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); + BUG_ON(!trans); mutex_lock(&root->fs_info->drop_mutex); ret = btrfs_drop_snapshot(trans, reloc_root); @@ -7587,7 +7535,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) if (found) { trans = btrfs_start_transaction(root, 1); - BUG_ON(IS_ERR(trans)); + BUG_ON(!trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); } @@ -7831,7 +7779,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, trans = btrfs_start_transaction(extent_root, 1); - BUG_ON(IS_ERR(trans)); + BUG_ON(!trans); if (extent_key->objectid == 0) { ret = del_extent_zero(trans, extent_root, path, extent_key); @@ -8322,13 +8270,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) if (block_group->cached == BTRFS_CACHE_STARTED) wait_block_group_cache_done(block_group); - /* - * We haven't cached this block group, which means we could - * possibly have excluded extents on this block group. - */ - if (block_group->cached == BTRFS_CACHE_NO) - free_excluded_extents(info->extent_root, block_group); - btrfs_remove_free_space_cache(block_group); btrfs_put_block_group(block_group); @@ -8443,13 +8384,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) cache->flags = btrfs_block_group_flags(&cache->item); cache->sectorsize = root->sectorsize; - /* - * We need to exclude the super stripes now so that the space - * info has super bytes accounted for, otherwise we'll think - * we have more space than we actually do. - */ - exclude_super_stripes(root, cache); - /* * check for two cases, either we are full, and therefore * don't need to bother with the caching work since we won't @@ -8458,10 +8392,12 @@ int btrfs_read_block_groups(struct btrfs_root *root) * time, particularly in the full case. */ if (found_key.offset == btrfs_block_group_used(&cache->item)) { + exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; free_excluded_extents(root, cache); } else if (btrfs_block_group_used(&cache->item) == 0) { + exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; add_new_free_space(cache, root->fs_info, diff --git a/trunk/fs/btrfs/extent_io.c b/trunk/fs/btrfs/extent_io.c index 5e76a474cb7e..2e993cf1766e 100644 --- a/trunk/fs/btrfs/extent_io.c +++ b/trunk/fs/btrfs/extent_io.c @@ -1865,7 +1865,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num, bio_get(bio); if (tree->ops && tree->ops->submit_bio_hook) - ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, + tree->ops->submit_bio_hook(page->mapping->host, rw, bio, mirror_num, bio_flags, start); else submit_bio(rw, bio); @@ -1920,8 +1920,6 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, nr = bio_get_nr_vecs(bdev); bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); - if (!bio) - return -ENOMEM; bio_add_page(bio, page, page_size, offset); bio->bi_end_io = end_io_func; @@ -2128,7 +2126,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, &bio_flags); if (bio) - ret = submit_one_bio(READ, bio, 0, bio_flags); + submit_one_bio(READ, bio, 0, bio_flags); return ret; } diff --git a/trunk/fs/btrfs/file-item.c b/trunk/fs/btrfs/file-item.c index 4f19a3e1bf32..a562a250ae77 100644 --- a/trunk/fs/btrfs/file-item.c +++ b/trunk/fs/btrfs/file-item.c @@ -536,8 +536,6 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, root = root->fs_info->csum_root; path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; while (1) { key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; @@ -550,10 +548,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, if (path->slots[0] == 0) goto out; path->slots[0]--; - } else if (ret < 0) { - goto out; } - leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); diff --git a/trunk/fs/btrfs/file.c b/trunk/fs/btrfs/file.c index c1d3a818731a..c800d58f3013 100644 --- a/trunk/fs/btrfs/file.c +++ b/trunk/fs/btrfs/file.c @@ -793,12 +793,8 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, for (i = 0; i < num_pages; i++) { pages[i] = grab_cache_page(inode->i_mapping, index + i); if (!pages[i]) { - int c; - for (c = i - 1; c >= 0; c--) { - unlock_page(pages[c]); - page_cache_release(pages[c]); - } - return -ENOMEM; + err = -ENOMEM; + BUG_ON(1); } wait_on_page_writeback(pages[i]); } @@ -950,10 +946,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / (sizeof(struct page *))); pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); - if (!pages) { - ret = -ENOMEM; - goto out; - } /* generic_write_checks can change our pos */ start_pos = pos; @@ -992,8 +984,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, size_t write_bytes = min(iov_iter_count(&i), nrptrs * (size_t)PAGE_CACHE_SIZE - offset); - size_t num_pages = (write_bytes + offset + - PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; WARN_ON(num_pages > nrptrs); memset(pages, 0, sizeof(struct page *) * nrptrs); @@ -1023,8 +1015,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, copied = btrfs_copy_from_user(pos, num_pages, write_bytes, pages, &i); - dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; if (num_pages > dirty_pages) { if (copied > 0) diff --git a/trunk/fs/btrfs/free-space-cache.c b/trunk/fs/btrfs/free-space-cache.c index a0390657451b..60d684266959 100644 --- a/trunk/fs/btrfs/free-space-cache.c +++ b/trunk/fs/btrfs/free-space-cache.c @@ -987,18 +987,11 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, return entry; } -static inline void -__unlink_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) -{ - rb_erase(&info->offset_index, &block_group->free_space_offset); - block_group->free_extents--; -} - static void unlink_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *info) { - __unlink_free_space(block_group, info); + rb_erase(&info->offset_index, &block_group->free_space_offset); + block_group->free_extents--; block_group->free_space -= info->bytes; } @@ -1023,18 +1016,14 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; - u64 size = block_group->key.offset; /* * The goal is to keep the total amount of memory used per 1gb of space * at or below 32k, so we need to adjust how much memory we allow to be * used by extent based free space tracking */ - if (size < 1024 * 1024 * 1024) - max_bytes = MAX_CACHE_BYTES_PER_GIG; - else - max_bytes = MAX_CACHE_BYTES_PER_GIG * - div64_u64(size, 1024 * 1024 * 1024); + max_bytes = MAX_CACHE_BYTES_PER_GIG * + (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); /* * we want to account for 1 more bitmap than what we have so we can make @@ -1182,16 +1171,6 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group, recalculate_thresholds(block_group); } -static void free_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *bitmap_info) -{ - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); -} - static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) @@ -1216,7 +1195,6 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro */ search_start = *offset; search_bytes = *bytes; - search_bytes = min(search_bytes, end - search_start + 1); ret = search_bitmap(block_group, bitmap_info, &search_start, &search_bytes); BUG_ON(ret < 0 || search_start != *offset); @@ -1233,8 +1211,13 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro if (*bytes) { struct rb_node *next = rb_next(&bitmap_info->offset_index); - if (!bitmap_info->bytes) - free_bitmap(block_group, bitmap_info); + if (!bitmap_info->bytes) { + unlink_free_space(block_group, bitmap_info); + kfree(bitmap_info->bitmap); + kfree(bitmap_info); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); + } /* * no entry after this bitmap, but we still have bytes to @@ -1267,8 +1250,13 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro return -EAGAIN; goto again; - } else if (!bitmap_info->bytes) - free_bitmap(block_group, bitmap_info); + } else if (!bitmap_info->bytes) { + unlink_free_space(block_group, bitmap_info); + kfree(bitmap_info->bitmap); + kfree(bitmap_info); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); + } return 0; } @@ -1371,14 +1359,22 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, return ret; } -bool try_merge_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info, bool update_stat) +int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) { - struct btrfs_free_space *left_info; - struct btrfs_free_space *right_info; - bool merged = false; - u64 offset = info->offset; - u64 bytes = info->bytes; + struct btrfs_free_space *right_info = NULL; + struct btrfs_free_space *left_info = NULL; + struct btrfs_free_space *info = NULL; + int ret = 0; + + info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); + if (!info) + return -ENOMEM; + + info->offset = offset; + info->bytes = bytes; + + spin_lock(&block_group->tree_lock); /* * first we want to see if there is free space adjacent to the range we @@ -1392,62 +1388,37 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, else left_info = tree_search_offset(block_group, offset - 1, 0, 0); + /* + * If there was no extent directly to the left or right of this new + * extent then we know we're going to have to allocate a new extent, so + * before we do that see if we need to drop this into a bitmap + */ + if ((!left_info || left_info->bitmap) && + (!right_info || right_info->bitmap)) { + ret = insert_into_bitmap(block_group, info); + + if (ret < 0) { + goto out; + } else if (ret) { + ret = 0; + goto out; + } + } + if (right_info && !right_info->bitmap) { - if (update_stat) - unlink_free_space(block_group, right_info); - else - __unlink_free_space(block_group, right_info); + unlink_free_space(block_group, right_info); info->bytes += right_info->bytes; kfree(right_info); - merged = true; } if (left_info && !left_info->bitmap && left_info->offset + left_info->bytes == offset) { - if (update_stat) - unlink_free_space(block_group, left_info); - else - __unlink_free_space(block_group, left_info); + unlink_free_space(block_group, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; kfree(left_info); - merged = true; } - return merged; -} - -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) -{ - struct btrfs_free_space *info; - int ret = 0; - - info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); - if (!info) - return -ENOMEM; - - info->offset = offset; - info->bytes = bytes; - - spin_lock(&block_group->tree_lock); - - if (try_merge_free_space(block_group, info, true)) - goto link; - - /* - * There was no extent directly to the left or right of this new - * extent then we know we're going to have to allocate a new extent, so - * before we do that see if we need to drop this into a bitmap - */ - ret = insert_into_bitmap(block_group, info); - if (ret < 0) { - goto out; - } else if (ret) { - ret = 0; - goto out; - } -link: ret = link_free_space(block_group, info); if (ret) kfree(info); @@ -1650,7 +1621,6 @@ __btrfs_return_cluster_to_free_space( node = rb_next(&entry->offset_index); rb_erase(&entry->offset_index, &cluster->root); BUG_ON(entry->bitmap); - try_merge_free_space(block_group, entry, false); tree_insert_offset(&block_group->free_space_offset, entry->offset, &entry->offset_index, 0); } @@ -1715,8 +1685,13 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ret = offset; if (entry->bitmap) { bitmap_clear_bits(block_group, entry, offset, bytes); - if (!entry->bytes) - free_bitmap(block_group, entry); + if (!entry->bytes) { + unlink_free_space(block_group, entry); + kfree(entry->bitmap); + kfree(entry); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); + } } else { unlink_free_space(block_group, entry); entry->offset += bytes; @@ -1814,8 +1789,6 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, ret = search_start; bitmap_clear_bits(block_group, entry, ret, bytes); - if (entry->bytes == 0) - free_bitmap(block_group, entry); out: spin_unlock(&cluster->lock); spin_unlock(&block_group->tree_lock); @@ -1869,26 +1842,15 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, entry->offset += bytes; entry->bytes -= bytes; - if (entry->bytes == 0) + if (entry->bytes == 0) { rb_erase(&entry->offset_index, &cluster->root); + kfree(entry); + } break; } out: spin_unlock(&cluster->lock); - if (!ret) - return 0; - - spin_lock(&block_group->tree_lock); - - block_group->free_space -= bytes; - if (entry->bytes == 0) { - block_group->free_extents--; - kfree(entry); - } - - spin_unlock(&block_group->tree_lock); - return ret; } diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index bcc461a9695f..160b55b3e132 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -416,7 +416,7 @@ static noinline int compress_file_range(struct inode *inode, } if (start == 0) { trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); + BUG_ON(!trans); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -612,7 +612,6 @@ static noinline int submit_compressed_extents(struct inode *inode, GFP_NOFS); trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); ret = btrfs_reserve_extent(trans, root, async_extent->compressed_size, async_extent->compressed_size, @@ -772,7 +771,7 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(root == root->fs_info->tree_root); trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); + BUG_ON(!trans); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1050,7 +1049,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, } else { trans = btrfs_join_transaction(root, 1); } - BUG_ON(IS_ERR(trans)); + BUG_ON(!trans); cow_start = (u64)-1; cur_offset = start; @@ -1558,7 +1557,6 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) out_page: unlock_page(page); page_cache_release(page); - kfree(fixup); } /* @@ -1705,7 +1703,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); + BUG_ON(!trans); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode(trans, root, inode); @@ -1722,7 +1720,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -2357,7 +2354,6 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) */ if (is_bad_inode(inode)) { trans = btrfs_start_transaction(root, 0); - BUG_ON(IS_ERR(trans)); btrfs_orphan_del(trans, inode); btrfs_end_transaction(trans, root); iput(inode); @@ -2385,7 +2381,6 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) if (root->orphan_block_rsv || root->orphan_item_inserted) { trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); btrfs_end_transaction(trans, root); } @@ -2646,7 +2641,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; - goto out; + goto err; } path->leave_spinning = 1; @@ -2719,10 +2714,9 @@ static int check_path_shared(struct btrfs_root *root, struct extent_buffer *eb; int level; u64 refs = 1; + int uninitialized_var(ret); for (level = 0; level < BTRFS_MAX_LEVEL; level++) { - int ret; - if (!path->nodes[level]) break; eb = path->nodes[level]; @@ -2733,7 +2727,7 @@ static int check_path_shared(struct btrfs_root *root, if (refs > 1) return 1; } - return 0; + return ret; /* XXX callers? */ } /* @@ -4140,7 +4134,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) } srcu_read_unlock(&root->fs_info->subvol_srcu, index); - if (!IS_ERR(inode) && root != sub_root) { + if (root != sub_root) { down_read(&root->fs_info->cleanup_work_sem); if (!(inode->i_sb->s_flags & MS_RDONLY)) btrfs_orphan_cleanup(sub_root); @@ -4353,8 +4347,6 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); - if (IS_ERR(trans)) - return PTR_ERR(trans); btrfs_set_trans_block_group(trans, inode); if (nolock) ret = btrfs_end_transaction_nolock(trans, root); @@ -4380,7 +4372,6 @@ void btrfs_dirty_inode(struct inode *inode) return; trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); @@ -5185,8 +5176,6 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, em = NULL; btrfs_release_path(root, path); trans = btrfs_join_transaction(root, 1); - if (IS_ERR(trans)) - return ERR_CAST(trans); goto again; } map = kmap(page); @@ -5291,8 +5280,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, btrfs_drop_extent_cache(inode, start, start + len - 1, 0); trans = btrfs_join_transaction(root, 0); - if (IS_ERR(trans)) - return ERR_CAST(trans); + if (!trans) + return ERR_PTR(-ENOMEM); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -5516,7 +5505,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * while we look for nocow cross refs */ trans = btrfs_join_transaction(root, 0); - if (IS_ERR(trans)) + if (!trans) goto must_cow; if (can_nocow_odirect(trans, inode, start, len) == 1) { @@ -5651,7 +5640,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) BUG_ON(!ordered); trans = btrfs_join_transaction(root, 1); - if (IS_ERR(trans)) { + if (!trans) { err = -ENOMEM; goto out; } diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c index 02d224e8c83f..a506a22b522a 100644 --- a/trunk/fs/btrfs/ioctl.c +++ b/trunk/fs/btrfs/ioctl.c @@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); + BUG_ON(!trans); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); @@ -907,10 +907,6 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, if (new_size > old_size) { trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out_unlock; - } ret = btrfs_grow_device(trans, device, new_size); btrfs_commit_transaction(trans, root); } else { @@ -1902,10 +1898,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, memcpy(&new_key, &key, sizeof(new_key)); new_key.objectid = inode->i_ino; - if (off <= key.offset) - new_key.offset = key.offset + destoff - off; - else - new_key.offset = destoff; + new_key.offset = key.offset + destoff - off; trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { @@ -2089,7 +2082,7 @@ static long btrfs_ioctl_trans_start(struct file *file) ret = -ENOMEM; trans = btrfs_start_ioctl_transaction(root, 0); - if (IS_ERR(trans)) + if (!trans) goto out_drop; file->private_data = trans; @@ -2145,9 +2138,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) path->leave_spinning = 1; trans = btrfs_start_transaction(root, 1); - if (IS_ERR(trans)) { + if (!trans) { btrfs_free_path(path); - return PTR_ERR(trans); + return -ENOMEM; } dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); @@ -2341,8 +2334,6 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp u64 transid; trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return PTR_ERR(trans); transid = trans->transid; btrfs_commit_transaction_async(trans, root, 0); diff --git a/trunk/fs/btrfs/ordered-data.c b/trunk/fs/btrfs/ordered-data.c index 083a55477375..2b61e1ddcd99 100644 --- a/trunk/fs/btrfs/ordered-data.c +++ b/trunk/fs/btrfs/ordered-data.c @@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, u64 file_offset) { struct rb_root *root = &tree->tree; - struct rb_node *prev = NULL; + struct rb_node *prev; struct rb_node *ret; struct btrfs_ordered_extent *entry; diff --git a/trunk/fs/btrfs/print-tree.c b/trunk/fs/btrfs/print-tree.c index fb2605d998e9..0d126be22b63 100644 --- a/trunk/fs/btrfs/print-tree.c +++ b/trunk/fs/btrfs/print-tree.c @@ -260,7 +260,6 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) #else BUG(); #endif - break; case BTRFS_BLOCK_GROUP_ITEM_KEY: bi = btrfs_item_ptr(l, i, struct btrfs_block_group_item); diff --git a/trunk/fs/btrfs/relocation.c b/trunk/fs/btrfs/relocation.c index 1f5556acb530..045c9c2b2d7e 100644 --- a/trunk/fs/btrfs/relocation.c +++ b/trunk/fs/btrfs/relocation.c @@ -2028,7 +2028,6 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, while (1) { trans = btrfs_start_transaction(root, 0); - BUG_ON(IS_ERR(trans)); trans->block_rsv = rc->block_rsv; ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, @@ -2148,12 +2147,6 @@ int prepare_to_merge(struct reloc_control *rc, int err) } trans = btrfs_join_transaction(rc->extent_root, 1); - if (IS_ERR(trans)) { - if (!err) - btrfs_block_rsv_release(rc->extent_root, - rc->block_rsv, num_bytes); - return PTR_ERR(trans); - } if (!err) { if (num_bytes != rc->merging_rsv_size) { @@ -3229,7 +3222,6 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); - ret = PTR_ERR(trans); goto out; } @@ -3636,7 +3628,6 @@ int prepare_to_relocate(struct reloc_control *rc) set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); - BUG_ON(IS_ERR(trans)); btrfs_commit_transaction(trans, rc->extent_root); return 0; } @@ -3666,7 +3657,6 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) while (1) { trans = btrfs_start_transaction(rc->extent_root, 0); - BUG_ON(IS_ERR(trans)); if (update_backref_cache(trans, &rc->backref_cache)) { btrfs_end_transaction(trans, rc->extent_root); @@ -3814,10 +3804,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) /* get rid of pinned extents */ trans = btrfs_join_transaction(rc->extent_root, 1); - if (IS_ERR(trans)) - err = PTR_ERR(trans); - else - btrfs_commit_transaction(trans, rc->extent_root); + btrfs_commit_transaction(trans, rc->extent_root); out_free: btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); btrfs_free_path(path); @@ -4035,7 +4022,6 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) int ret; trans = btrfs_start_transaction(root->fs_info->tree_root, 0); - BUG_ON(IS_ERR(trans)); memset(&root->root_item.drop_progress, 0, sizeof(root->root_item.drop_progress)); @@ -4139,11 +4125,6 @@ int btrfs_recover_relocation(struct btrfs_root *root) set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); - if (IS_ERR(trans)) { - unset_reloc_control(rc); - err = PTR_ERR(trans); - goto out_free; - } rc->merge_reloc_tree = 1; @@ -4173,13 +4154,9 @@ int btrfs_recover_relocation(struct btrfs_root *root) unset_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); - if (IS_ERR(trans)) - err = PTR_ERR(trans); - else - btrfs_commit_transaction(trans, rc->extent_root); -out_free: - kfree(rc); + btrfs_commit_transaction(trans, rc->extent_root); out: + kfree(rc); while (!list_empty(&reloc_roots)) { reloc_root = list_entry(reloc_roots.next, struct btrfs_root, root_list); diff --git a/trunk/fs/btrfs/super.c b/trunk/fs/btrfs/super.c index a004008f7d28..b2130c46fdb5 100644 --- a/trunk/fs/btrfs/super.c +++ b/trunk/fs/btrfs/super.c @@ -383,7 +383,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, struct btrfs_fs_devices **fs_devices) { substring_t args[MAX_OPT_ARGS]; - char *opts, *orig, *p; + char *opts, *p; int error = 0; int intarg; @@ -397,7 +397,6 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, opts = kstrdup(options, GFP_KERNEL); if (!opts) return -ENOMEM; - orig = opts; while ((p = strsep(&opts, ",")) != NULL) { int token; @@ -433,7 +432,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, } out_free_opts: - kfree(orig); + kfree(opts); out: /* * If no subvolume name is specified we use the default one. Allocate @@ -624,8 +623,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait) btrfs_wait_ordered_extents(root, 0, 0); trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); return ret; } @@ -764,8 +761,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } btrfs_close_devices(fs_devices); - kfree(fs_info); - kfree(tree_root); } else { char b[BDEVNAME_SIZE]; diff --git a/trunk/fs/btrfs/transaction.c b/trunk/fs/btrfs/transaction.c index 3d73c8d93bbb..bae5c7b8bbe2 100644 --- a/trunk/fs/btrfs/transaction.c +++ b/trunk/fs/btrfs/transaction.c @@ -1161,11 +1161,6 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, INIT_DELAYED_WORK(&ac->work, do_async_commit); ac->root = root; ac->newtrans = btrfs_join_transaction(root, 0); - if (IS_ERR(ac->newtrans)) { - int err = PTR_ERR(ac->newtrans); - kfree(ac); - return err; - } /* take transaction reference */ mutex_lock(&root->fs_info->trans_mutex); diff --git a/trunk/fs/btrfs/tree-log.c b/trunk/fs/btrfs/tree-log.c index a4bbb854dfd2..054744ac5719 100644 --- a/trunk/fs/btrfs/tree-log.c +++ b/trunk/fs/btrfs/tree-log.c @@ -338,12 +338,6 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, } dst_copy = kmalloc(item_size, GFP_NOFS); src_copy = kmalloc(item_size, GFP_NOFS); - if (!dst_copy || !src_copy) { - btrfs_release_path(root, path); - kfree(dst_copy); - kfree(src_copy); - return -ENOMEM; - } read_extent_buffer(eb, src_copy, src_ptr, item_size); @@ -671,9 +665,6 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, btrfs_dir_item_key_to_cpu(leaf, di, &location); name_len = btrfs_dir_name_len(leaf, di); name = kmalloc(name_len, GFP_NOFS); - if (!name) - return -ENOMEM; - read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); btrfs_release_path(root, path); @@ -753,9 +744,6 @@ static noinline int backref_in_log(struct btrfs_root *log, int match = 0; path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - ret = btrfs_search_slot(NULL, log, key, path, 0, 0); if (ret != 0) goto out; @@ -979,8 +967,6 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, key.offset = (u64)-1; path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -1192,9 +1178,6 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); - if (!name) - return -ENOMEM; - log_type = btrfs_dir_type(eb, di); read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len); @@ -1709,8 +1692,6 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, root_owner = btrfs_header_owner(parent); next = btrfs_find_create_tree_block(root, bytenr, blocksize); - if (!next) - return -ENOMEM; if (*level == 1) { wc->process_func(root, next, wc, ptr_gen); @@ -2051,7 +2032,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, wait_log_commit(trans, log_root_tree, log_root_tree->log_transid); mutex_unlock(&log_root_tree->log_mutex); - ret = 0; goto out; } atomic_set(&log_root_tree->log_commit[index2], 1); @@ -2116,7 +2096,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, smp_mb(); if (waitqueue_active(&root->log_commit_wait[index1])) wake_up(&root->log_commit_wait[index1]); - return ret; + return 0; } static void free_log_tree(struct btrfs_trans_handle *trans, @@ -2214,9 +2194,6 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, name, name_len, -1); if (IS_ERR(di)) { @@ -2617,9 +2594,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ins_data = kmalloc(nr * sizeof(struct btrfs_key) + nr * sizeof(u32), GFP_NOFS); - if (!ins_data) - return -ENOMEM; - ins_sizes = (u32 *)ins_data; ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); @@ -2751,13 +2725,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; dst_path = btrfs_alloc_path(); - if (!dst_path) { - btrfs_free_path(path); - return -ENOMEM; - } min_key.objectid = inode->i_ino; min_key.type = BTRFS_INODE_ITEM_KEY; @@ -3112,7 +3080,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) BUG_ON(!path); trans = btrfs_start_transaction(fs_info->tree_root, 0); - BUG_ON(IS_ERR(trans)); wc.trans = trans; wc.pin = 1; diff --git a/trunk/fs/btrfs/volumes.c b/trunk/fs/btrfs/volumes.c index 2636a051e4b2..d158530233b7 100644 --- a/trunk/fs/btrfs/volumes.c +++ b/trunk/fs/btrfs/volumes.c @@ -1213,10 +1213,6 @@ static int btrfs_rm_dev_item(struct btrfs_root *root, return -ENOMEM; trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) { - btrfs_free_path(path); - return PTR_ERR(trans); - } key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; @@ -1610,12 +1606,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) } trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) { - kfree(device); - ret = PTR_ERR(trans); - goto error; - } - lock_chunks(root); device->writeable = 1; @@ -1883,7 +1873,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, return ret; trans = btrfs_start_transaction(root, 0); - BUG_ON(IS_ERR(trans)); + BUG_ON(!trans); lock_chunks(root); @@ -2057,7 +2047,7 @@ int btrfs_balance(struct btrfs_root *dev_root) BUG_ON(ret); trans = btrfs_start_transaction(dev_root, 0); - BUG_ON(IS_ERR(trans)); + BUG_ON(!trans); ret = btrfs_grow_device(trans, device, old_size); BUG_ON(ret); @@ -2223,11 +2213,6 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) /* Shrinking succeeded, else we would be at "done". */ trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto done; - } - lock_chunks(root); device->disk_total_bytes = new_size; diff --git a/trunk/fs/cifs/cifsacl.c b/trunk/fs/cifs/cifsacl.c index beeebf194234..1e7636b145a8 100644 --- a/trunk/fs/cifs/cifsacl.c +++ b/trunk/fs/cifs/cifsacl.c @@ -372,10 +372,6 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), GFP_KERNEL); - if (!ppace) { - cERROR(1, "DACL memory allocation error"); - return; - } for (i = 0; i < num_aces; ++i) { ppace[i] = (struct cifs_ace *) (acl_base + acl_size); diff --git a/trunk/fs/cifs/cifssmb.c b/trunk/fs/cifs/cifssmb.c index 904aa47e3515..46c66ed01af4 100644 --- a/trunk/fs/cifs/cifssmb.c +++ b/trunk/fs/cifs/cifssmb.c @@ -136,6 +136,9 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) } } + if (ses->status == CifsExiting) + return -EIO; + /* * Give demultiplex thread up to 10 seconds to reconnect, should be * greater than cifs socket timeout which is 7 seconds @@ -153,7 +156,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) * retrying until process is killed or server comes * back on-line */ - if (!tcon->retry) { + if (!tcon->retry || ses->status == CifsExiting) { cFYI(1, "gave up waiting on reconnect in smb_init"); return -EHOSTDOWN; } diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c index 257b6d895e20..47d8ff623683 100644 --- a/trunk/fs/cifs/connect.c +++ b/trunk/fs/cifs/connect.c @@ -337,12 +337,8 @@ cifs_echo_request(struct work_struct *work) struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, echo.work); - /* - * We cannot send an echo until the NEGOTIATE_PROTOCOL request is done. - * Also, no need to ping if we got a response recently - */ - if (server->tcpStatus != CifsGood || - time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) + /* no need to ping if we got a response recently */ + if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) goto requeue_echo; rc = CIFSSMBEcho(server); @@ -582,12 +578,12 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) else if (reconnect == 1) continue; - total_read += 4; /* account for rfc1002 hdr */ + length += 4; /* account for rfc1002 hdr */ - dump_smb(smb_buffer, total_read); - if (checkSMB(smb_buffer, smb_buffer->Mid, total_read)) { - cifs_dump_mem("Bad SMB: ", smb_buffer, - total_read < 48 ? total_read : 48); + + dump_smb(smb_buffer, length); + if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) { + cifs_dump_mem("Bad SMB: ", smb_buffer, 48); continue; } @@ -637,11 +633,11 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: mid_entry->midState = MID_RESPONSE_RECEIVED; + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); #ifdef CONFIG_CIFS_STATS2 mid_entry->when_received = jiffies; #endif - list_del_init(&mid_entry->qhead); - mid_entry->callback(mid_entry); break; } mid_entry = NULL; diff --git a/trunk/fs/cifs/file.c b/trunk/fs/cifs/file.c index e964b1cd5dd0..74c0a282d012 100644 --- a/trunk/fs/cifs/file.c +++ b/trunk/fs/cifs/file.c @@ -1662,10 +1662,10 @@ static ssize_t cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { - unsigned int written; - unsigned long num_pages, npages, i; - size_t copied, len, cur_len; - ssize_t total_written = 0; + size_t total_written = 0; + unsigned int written = 0; + unsigned long num_pages, npages; + size_t copied, len, cur_len, i; struct kvec *to_send; struct page **pages; struct iov_iter it; @@ -1821,8 +1821,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, { int rc; int xid; - ssize_t total_read; - unsigned int bytes_read = 0; + unsigned int total_read, bytes_read = 0; size_t len, cur_len; int iov_offset = 0; struct cifs_sb_info *cifs_sb; diff --git a/trunk/fs/cifs/transport.c b/trunk/fs/cifs/transport.c index fbc5aace54b1..b8c5e2eb43d0 100644 --- a/trunk/fs/cifs/transport.c +++ b/trunk/fs/cifs/transport.c @@ -359,10 +359,6 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, if (rc) return rc; - /* enable signing if server requires it */ - if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) - in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; - mutex_lock(&server->srv_mutex); mid = AllocMidQEntry(in_buf, server); if (mid == NULL) { diff --git a/trunk/kernel/cred.c b/trunk/kernel/cred.c index 3a9d6dd53a6c..6a1aa004e376 100644 --- a/trunk/kernel/cred.c +++ b/trunk/kernel/cred.c @@ -252,13 +252,13 @@ struct cred *cred_alloc_blank(void) #endif atomic_set(&new->usage, 1); -#ifdef CONFIG_DEBUG_CREDENTIALS - new->magic = CRED_MAGIC; -#endif if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) goto error; +#ifdef CONFIG_DEBUG_CREDENTIALS + new->magic = CRED_MAGIC; +#endif return new; error: @@ -657,8 +657,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) validate_creds(old); *new = *old; - atomic_set(&new->usage, 1); - set_cred_subscribers(new, 0); get_uid(new->user); get_group_info(new->group_info); @@ -676,6 +674,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; + atomic_set(&new->usage, 1); + set_cred_subscribers(new, 0); put_cred(old); validate_creds(new); return new; @@ -748,11 +748,7 @@ bool creds_are_invalid(const struct cred *cred) if (cred->magic != CRED_MAGIC) return true; #ifdef CONFIG_SECURITY_SELINUX - /* - * cred->security == NULL if security_cred_alloc_blank() or - * security_prepare_creds() returned an error. - */ - if (selinux_is_enabled() && cred->security) { + if (selinux_is_enabled()) { if ((unsigned long) cred->security < PAGE_SIZE) return true; if ((*(u32 *)cred->security & 0xffffff00) == diff --git a/trunk/security/selinux/hooks.c b/trunk/security/selinux/hooks.c index c8d699270687..e276eb468536 100644 --- a/trunk/security/selinux/hooks.c +++ b/trunk/security/selinux/hooks.c @@ -3198,11 +3198,7 @@ static void selinux_cred_free(struct cred *cred) { struct task_security_struct *tsec = cred->security; - /* - * cred->security == NULL if security_cred_alloc_blank() or - * security_prepare_creds() returned an error. - */ - BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE); + BUG_ON((unsigned long) cred->security < PAGE_SIZE); cred->security = (void *) 0x7UL; kfree(tsec); }