diff --git a/[refs] b/[refs] index a215bfb50164..a6cd57e28cb7 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 9ec23949a2d1cb2574c5ec927ccab6dc5685e5f9 +refs/heads/master: c336078bf65c4d38caa9a4b8b7b7261c778e622c diff --git a/trunk/Makefile b/trunk/Makefile index ea51081812f3..a43733df3978 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc6 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/trunk/arch/arm/mach-s3c64xx/Kconfig b/trunk/arch/arm/mach-s3c64xx/Kconfig index 381586c7b1b2..5552e048c2be 100644 --- a/trunk/arch/arm/mach-s3c64xx/Kconfig +++ b/trunk/arch/arm/mach-s3c64xx/Kconfig @@ -8,7 +8,6 @@ config PLAT_S3C64XX bool depends on ARCH_S3C64XX select SAMSUNG_WAKEMASK - select PM_GENERIC_DOMAINS default y help Base platform code for any Samsung S3C64XX device diff --git a/trunk/arch/arm/mach-s3c64xx/mach-crag6410.c b/trunk/arch/arm/mach-s3c64xx/mach-crag6410.c index 707b9b22f9fd..d04b65448510 100644 --- a/trunk/arch/arm/mach-s3c64xx/mach-crag6410.c +++ b/trunk/arch/arm/mach-s3c64xx/mach-crag6410.c @@ -704,7 +704,7 @@ static void __init crag6410_machine_init(void) regulator_has_full_constraints(); - s3c64xx_pm_init(); + s3c_pm_init(); } MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410") diff --git a/trunk/arch/arm/mach-s3c64xx/pm.c b/trunk/arch/arm/mach-s3c64xx/pm.c index 7d3e81b9dd06..b375cd5c47cb 100644 --- a/trunk/arch/arm/mach-s3c64xx/pm.c +++ b/trunk/arch/arm/mach-s3c64xx/pm.c @@ -17,12 +17,10 @@ #include #include #include -#include #include #include -#include #include #include @@ -33,148 +31,6 @@ #include #include -struct s3c64xx_pm_domain { - char *const name; - u32 ena; - u32 pwr_stat; - struct generic_pm_domain pd; -}; - -static int s3c64xx_pd_off(struct generic_pm_domain *domain) -{ - struct s3c64xx_pm_domain *pd; - u32 val; - - pd = container_of(domain, struct s3c64xx_pm_domain, pd); - - val = __raw_readl(S3C64XX_NORMAL_CFG); - val &= ~(pd->ena); - __raw_writel(val, S3C64XX_NORMAL_CFG); - - return 0; -} - -static int s3c64xx_pd_on(struct generic_pm_domain *domain) -{ - struct s3c64xx_pm_domain *pd; - u32 val; - long retry = 1000000L; - - pd = container_of(domain, struct s3c64xx_pm_domain, pd); - - val = __raw_readl(S3C64XX_NORMAL_CFG); - val |= pd->ena; - __raw_writel(val, S3C64XX_NORMAL_CFG); - - /* Not all domains provide power status readback */ - if (pd->pwr_stat) { - do { - cpu_relax(); - if (__raw_readl(S3C64XX_BLK_PWR_STAT) & pd->pwr_stat) - break; - } while (retry--); - - if (!retry) { - pr_err("Failed to start domain %s\n", pd->name); - return -EBUSY; - } - } - - return 0; -} - -static struct s3c64xx_pm_domain s3c64xx_pm_irom = { - .name = "IROM", - .ena = S3C64XX_NORMALCFG_IROM_ON, - .pd = { - .power_off = s3c64xx_pd_off, - .power_on = s3c64xx_pd_on, - }, -}; - -static struct s3c64xx_pm_domain s3c64xx_pm_etm = { - .name = "ETM", - .ena = S3C64XX_NORMALCFG_DOMAIN_ETM_ON, - .pwr_stat = S3C64XX_BLKPWRSTAT_ETM, - .pd = { - .power_off = s3c64xx_pd_off, - .power_on = s3c64xx_pd_on, - }, -}; - -static struct s3c64xx_pm_domain s3c64xx_pm_s = { - .name = "S", - .ena = S3C64XX_NORMALCFG_DOMAIN_S_ON, - .pwr_stat = S3C64XX_BLKPWRSTAT_S, - .pd = { - .power_off = s3c64xx_pd_off, - .power_on = s3c64xx_pd_on, - }, -}; - -static struct s3c64xx_pm_domain s3c64xx_pm_f = { - .name = "F", - .ena = S3C64XX_NORMALCFG_DOMAIN_F_ON, - .pwr_stat = S3C64XX_BLKPWRSTAT_F, - .pd = { - .power_off = s3c64xx_pd_off, - .power_on = s3c64xx_pd_on, - }, -}; - -static struct s3c64xx_pm_domain s3c64xx_pm_p = { - .name = "P", - .ena = S3C64XX_NORMALCFG_DOMAIN_P_ON, - .pwr_stat = S3C64XX_BLKPWRSTAT_P, - .pd = { - .power_off = s3c64xx_pd_off, - .power_on = s3c64xx_pd_on, - }, -}; - -static struct s3c64xx_pm_domain s3c64xx_pm_i = { - .name = "I", - .ena = S3C64XX_NORMALCFG_DOMAIN_I_ON, - .pwr_stat = S3C64XX_BLKPWRSTAT_I, - .pd = { - .power_off = s3c64xx_pd_off, - .power_on = s3c64xx_pd_on, - }, -}; - -static struct s3c64xx_pm_domain s3c64xx_pm_g = { - .name = "G", - .ena = S3C64XX_NORMALCFG_DOMAIN_G_ON, - .pd = { - .power_off = s3c64xx_pd_off, - .power_on = s3c64xx_pd_on, - }, -}; - -static struct s3c64xx_pm_domain s3c64xx_pm_v = { - .name = "V", - .ena = S3C64XX_NORMALCFG_DOMAIN_V_ON, - .pwr_stat = S3C64XX_BLKPWRSTAT_V, - .pd = { - .power_off = s3c64xx_pd_off, - .power_on = s3c64xx_pd_on, - }, -}; - -static struct s3c64xx_pm_domain *s3c64xx_always_on_pm_domains[] = { - &s3c64xx_pm_irom, -}; - -static struct s3c64xx_pm_domain *s3c64xx_pm_domains[] = { - &s3c64xx_pm_etm, - &s3c64xx_pm_g, - &s3c64xx_pm_v, - &s3c64xx_pm_i, - &s3c64xx_pm_p, - &s3c64xx_pm_s, - &s3c64xx_pm_f, -}; - #ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK void s3c_pm_debug_smdkled(u32 set, u32 clear) { @@ -233,8 +89,6 @@ static struct sleep_save misc_save[] = { SAVE_ITEM(S3C64XX_SDMA_SEL), SAVE_ITEM(S3C64XX_MODEM_MIFPCON), - - SAVE_ITEM(S3C64XX_NORMAL_CFG), }; void s3c_pm_configure_extint(void) @@ -325,26 +179,7 @@ static void s3c64xx_pm_prepare(void) __raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT); } -int __init s3c64xx_pm_init(void) -{ - int i; - - s3c_pm_init(); - - for (i = 0; i < ARRAY_SIZE(s3c64xx_always_on_pm_domains); i++) - pm_genpd_init(&s3c64xx_always_on_pm_domains[i]->pd, - &pm_domain_always_on_gov, false); - - for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++) - pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false); - - if (dev_get_platdata(&s3c_device_fb.dev)) - pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev); - - return 0; -} - -static __init int s3c64xx_pm_initcall(void) +static int s3c64xx_pm_init(void) { pm_cpu_prep = s3c64xx_pm_prepare; pm_cpu_sleep = s3c64xx_cpu_suspend; @@ -363,12 +198,5 @@ static __init int s3c64xx_pm_initcall(void) return 0; } -arch_initcall(s3c64xx_pm_initcall); - -static __init int s3c64xx_pm_late_initcall(void) -{ - pm_genpd_poweroff_unused(); - return 0; -} -late_initcall(s3c64xx_pm_late_initcall); +arch_initcall(s3c64xx_pm_init); diff --git a/trunk/arch/arm/mach-shmobile/include/mach/common.h b/trunk/arch/arm/mach-shmobile/include/mach/common.h index 4807623fb71c..834bd6cd508f 100644 --- a/trunk/arch/arm/mach-shmobile/include/mach/common.h +++ b/trunk/arch/arm/mach-shmobile/include/mach/common.h @@ -35,8 +35,8 @@ extern void sh7372_add_standard_devices(void); extern void sh7372_clock_init(void); extern void sh7372_pinmux_init(void); extern void sh7372_pm_init(void); -extern void sh7372_resume_core_standby_sysc(void); -extern int sh7372_do_idle_sysc(unsigned long sleep_mode); +extern void sh7372_resume_core_standby_a3sm(void); +extern int sh7372_do_idle_a3sm(unsigned long unused); extern struct clk sh7372_extal1_clk; extern struct clk sh7372_extal2_clk; diff --git a/trunk/arch/arm/mach-shmobile/include/mach/sh7372.h b/trunk/arch/arm/mach-shmobile/include/mach/sh7372.h index 8254ab86f6cd..84532f9629b2 100644 --- a/trunk/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/trunk/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -480,10 +480,11 @@ struct platform_device; struct sh7372_pm_domain { struct generic_pm_domain genpd; struct dev_power_governor *gov; - int (*suspend)(void); + void (*suspend)(void); void (*resume)(void); unsigned int bit_shift; bool no_debug; + bool stay_on; }; static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) @@ -498,7 +499,6 @@ extern struct sh7372_pm_domain sh7372_d4; extern struct sh7372_pm_domain sh7372_a4r; extern struct sh7372_pm_domain sh7372_a3rv; extern struct sh7372_pm_domain sh7372_a3ri; -extern struct sh7372_pm_domain sh7372_a4s; extern struct sh7372_pm_domain sh7372_a3sp; extern struct sh7372_pm_domain sh7372_a3sg; @@ -515,7 +515,5 @@ extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, extern void sh7372_intcs_suspend(void); extern void sh7372_intcs_resume(void); -extern void sh7372_intca_suspend(void); -extern void sh7372_intca_resume(void); #endif /* __ASM_SH7372_H__ */ diff --git a/trunk/arch/arm/mach-shmobile/intc-sh7372.c b/trunk/arch/arm/mach-shmobile/intc-sh7372.c index 89afcaba99a1..2d8856df80e2 100644 --- a/trunk/arch/arm/mach-shmobile/intc-sh7372.c +++ b/trunk/arch/arm/mach-shmobile/intc-sh7372.c @@ -535,7 +535,6 @@ static struct resource intcs_resources[] __initdata = { static struct intc_desc intcs_desc __initdata = { .name = "sh7372-intcs", .force_enable = ENABLED_INTCS, - .skip_syscore_suspend = true, .resource = intcs_resources, .num_resources = ARRAY_SIZE(intcs_resources), .hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers, @@ -612,52 +611,3 @@ void sh7372_intcs_resume(void) for (k = 0x80; k <= 0x9c; k += 4) __raw_writeb(ffd5[k], intcs_ffd5 + k); } - -static unsigned short e694[0x200]; -static unsigned short e695[0x200]; - -void sh7372_intca_suspend(void) -{ - int k; - - for (k = 0x00; k <= 0x38; k += 4) - e694[k] = __raw_readw(0xe6940000 + k); - - for (k = 0x80; k <= 0xb4; k += 4) - e694[k] = __raw_readb(0xe6940000 + k); - - for (k = 0x180; k <= 0x1b4; k += 4) - e694[k] = __raw_readb(0xe6940000 + k); - - for (k = 0x00; k <= 0x50; k += 4) - e695[k] = __raw_readw(0xe6950000 + k); - - for (k = 0x80; k <= 0xa8; k += 4) - e695[k] = __raw_readb(0xe6950000 + k); - - for (k = 0x180; k <= 0x1a8; k += 4) - e695[k] = __raw_readb(0xe6950000 + k); -} - -void sh7372_intca_resume(void) -{ - int k; - - for (k = 0x00; k <= 0x38; k += 4) - __raw_writew(e694[k], 0xe6940000 + k); - - for (k = 0x80; k <= 0xb4; k += 4) - __raw_writeb(e694[k], 0xe6940000 + k); - - for (k = 0x180; k <= 0x1b4; k += 4) - __raw_writeb(e694[k], 0xe6940000 + k); - - for (k = 0x00; k <= 0x50; k += 4) - __raw_writew(e695[k], 0xe6950000 + k); - - for (k = 0x80; k <= 0xa8; k += 4) - __raw_writeb(e695[k], 0xe6950000 + k); - - for (k = 0x180; k <= 0x1a8; k += 4) - __raw_writeb(e695[k], 0xe6950000 + k); -} diff --git a/trunk/arch/arm/mach-shmobile/pm-sh7372.c b/trunk/arch/arm/mach-shmobile/pm-sh7372.c index 77b8fc12fc2f..34bbcbfb1706 100644 --- a/trunk/arch/arm/mach-shmobile/pm-sh7372.c +++ b/trunk/arch/arm/mach-shmobile/pm-sh7372.c @@ -82,12 +82,11 @@ static int pd_power_down(struct generic_pm_domain *genpd) struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); unsigned int mask = 1 << sh7372_pd->bit_shift; - if (sh7372_pd->suspend) { - int ret = sh7372_pd->suspend(); + if (sh7372_pd->suspend) + sh7372_pd->suspend(); - if (ret) - return ret; - } + if (sh7372_pd->stay_on) + return 0; if (__raw_readl(PSTR) & mask) { unsigned int retry_count; @@ -102,8 +101,8 @@ static int pd_power_down(struct generic_pm_domain *genpd) } if (!sh7372_pd->no_debug) - pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n", - genpd->name, mask, __raw_readl(PSTR)); + pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n", + mask, __raw_readl(PSTR)); return 0; } @@ -114,6 +113,9 @@ static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume) unsigned int retry_count; int ret = 0; + if (sh7372_pd->stay_on) + goto out; + if (__raw_readl(PSTR) & mask) goto out; @@ -131,8 +133,8 @@ static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume) ret = -EIO; if (!sh7372_pd->no_debug) - pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n", - sh7372_pd->genpd.name, mask, __raw_readl(PSTR)); + pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n", + mask, __raw_readl(PSTR)); out: if (ret == 0 && sh7372_pd->resume && do_resume) @@ -146,60 +148,35 @@ static int pd_power_up(struct generic_pm_domain *genpd) return __pd_power_up(to_sh7372_pd(genpd), true); } -static int sh7372_a4r_suspend(void) +static void sh7372_a4r_suspend(void) { sh7372_intcs_suspend(); __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */ - return 0; } static bool pd_active_wakeup(struct device *dev) { - bool (*active_wakeup)(struct device *dev); - - active_wakeup = dev_gpd_data(dev)->ops.active_wakeup; - return active_wakeup ? active_wakeup(dev) : true; + return true; } -static int sh7372_stop_dev(struct device *dev) +static bool sh7372_power_down_forbidden(struct dev_pm_domain *domain) { - int (*stop)(struct device *dev); - - stop = dev_gpd_data(dev)->ops.stop; - if (stop) { - int ret = stop(dev); - if (ret) - return ret; - } - return pm_clk_suspend(dev); + return false; } -static int sh7372_start_dev(struct device *dev) -{ - int (*start)(struct device *dev); - int ret; - - ret = pm_clk_resume(dev); - if (ret) - return ret; - - start = dev_gpd_data(dev)->ops.start; - if (start) - ret = start(dev); - - return ret; -} +struct dev_power_governor sh7372_always_on_gov = { + .power_down_ok = sh7372_power_down_forbidden, +}; void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) { struct generic_pm_domain *genpd = &sh7372_pd->genpd; - struct dev_power_governor *gov = sh7372_pd->gov; - pm_genpd_init(genpd, gov ? : &simple_qos_governor, false); - genpd->dev_ops.stop = sh7372_stop_dev; - genpd->dev_ops.start = sh7372_start_dev; - genpd->dev_ops.active_wakeup = pd_active_wakeup; + pm_genpd_init(genpd, sh7372_pd->gov, false); + genpd->stop_device = pm_clk_suspend; + genpd->start_device = pm_clk_resume; genpd->dev_irq_safe = true; + genpd->active_wakeup = pd_active_wakeup; genpd->power_off = pd_power_down; genpd->power_on = pd_power_up; __pd_power_up(sh7372_pd, false); @@ -222,73 +199,48 @@ void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, } struct sh7372_pm_domain sh7372_a4lc = { - .genpd.name = "A4LC", .bit_shift = 1, }; struct sh7372_pm_domain sh7372_a4mp = { - .genpd.name = "A4MP", .bit_shift = 2, }; struct sh7372_pm_domain sh7372_d4 = { - .genpd.name = "D4", .bit_shift = 3, }; struct sh7372_pm_domain sh7372_a4r = { - .genpd.name = "A4R", .bit_shift = 5, + .gov = &sh7372_always_on_gov, .suspend = sh7372_a4r_suspend, .resume = sh7372_intcs_resume, + .stay_on = true, }; struct sh7372_pm_domain sh7372_a3rv = { - .genpd.name = "A3RV", .bit_shift = 6, }; struct sh7372_pm_domain sh7372_a3ri = { - .genpd.name = "A3RI", .bit_shift = 8, }; -static int sh7372_a4s_suspend(void) -{ - /* - * The A4S domain contains the CPU core and therefore it should - * only be turned off if the CPU is in use. - */ - return -EBUSY; -} - -struct sh7372_pm_domain sh7372_a4s = { - .genpd.name = "A4S", - .bit_shift = 10, - .gov = &pm_domain_always_on_gov, +struct sh7372_pm_domain sh7372_a3sp = { + .bit_shift = 11, + .gov = &sh7372_always_on_gov, .no_debug = true, - .suspend = sh7372_a4s_suspend, }; -static int sh7372_a3sp_suspend(void) +static void sh7372_a3sp_init(void) { - /* - * Serial consoles make use of SCIF hardware located in A3SP, + /* serial consoles make use of SCIF hardware located in A3SP, * keep such power domain on if "no_console_suspend" is set. */ - return console_suspend_enabled ? -EBUSY : 0; + sh7372_a3sp.stay_on = !console_suspend_enabled; } -struct sh7372_pm_domain sh7372_a3sp = { - .genpd.name = "A3SP", - .bit_shift = 11, - .gov = &pm_domain_always_on_gov, - .no_debug = true, - .suspend = sh7372_a3sp_suspend, -}; - struct sh7372_pm_domain sh7372_a3sg = { - .genpd.name = "A3SG", .bit_shift = 13, }; @@ -305,16 +257,11 @@ static int sh7372_do_idle_core_standby(unsigned long unused) return 0; } -static void sh7372_set_reset_vector(unsigned long address) +static void sh7372_enter_core_standby(void) { /* set reset vector, translate 4k */ - __raw_writel(address, SBAR); + __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR); __raw_writel(0, APARMBAREA); -} - -static void sh7372_enter_core_standby(void) -{ - sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); /* enter sleep mode with SYSTBCR to 0x10 */ __raw_writel(0x10, SYSTBCR); @@ -327,22 +274,27 @@ static void sh7372_enter_core_standby(void) #endif #ifdef CONFIG_SUSPEND -static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode) +static void sh7372_enter_a3sm_common(int pllc0_on) { + /* set reset vector, translate 4k */ + __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR); + __raw_writel(0, APARMBAREA); + if (pllc0_on) __raw_writel(0, PLLC01STPCR); else __raw_writel(1 << 28, PLLC01STPCR); + __raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */ __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */ - cpu_suspend(sleep_mode, sh7372_do_idle_sysc); + cpu_suspend(0, sh7372_do_idle_a3sm); __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */ /* disable reset vector translation */ __raw_writel(0, SBAR); } -static int sh7372_sysc_valid(unsigned long *mskp, unsigned long *msk2p) +static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p) { unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4; unsigned long msk, msk2; @@ -430,7 +382,7 @@ static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p) *irqcr2p = irqcr2; } -static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2) +static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2) { u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high; unsigned long tmp; @@ -463,22 +415,6 @@ static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2) __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3); __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4); } - -static void sh7372_enter_a3sm_common(int pllc0_on) -{ - sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); - sh7372_enter_sysc(pllc0_on, 1 << 12); -} - -static void sh7372_enter_a4s_common(int pllc0_on) -{ - sh7372_intca_suspend(); - memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100); - sh7372_set_reset_vector(SMFRAM); - sh7372_enter_sysc(pllc0_on, 1 << 10); - sh7372_intca_resume(); -} - #endif #ifdef CONFIG_CPU_IDLE @@ -512,20 +448,14 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state) unsigned long msk, msk2; /* check active clocks to determine potential wakeup sources */ - if (sh7372_sysc_valid(&msk, &msk2)) { + if (sh7372_a3sm_valid(&msk, &msk2)) { + /* convert INTC mask and sense to SYSC mask and sense */ - sh7372_setup_sysc(msk, msk2); - - if (!console_suspend_enabled && - sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) { - /* enter A4S sleep with PLLC0 off */ - pr_debug("entering A4S\n"); - sh7372_enter_a4s_common(0); - } else { - /* enter A3SM sleep with PLLC0 off */ - pr_debug("entering A3SM\n"); - sh7372_enter_a3sm_common(0); - } + sh7372_setup_a3sm(msk, msk2); + + /* enter A3SM sleep with PLLC0 off */ + pr_debug("entering A3SM\n"); + sh7372_enter_a3sm_common(0); } else { /* default to Core Standby that supports all wakeup sources */ pr_debug("entering Core Standby\n"); @@ -534,37 +464,9 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state) return 0; } -/** - * sh7372_pm_notifier_fn - SH7372 PM notifier routine. - * @notifier: Unused. - * @pm_event: Event being handled. - * @unused: Unused. - */ -static int sh7372_pm_notifier_fn(struct notifier_block *notifier, - unsigned long pm_event, void *unused) -{ - switch (pm_event) { - case PM_SUSPEND_PREPARE: - /* - * This is necessary, because the A4R domain has to be "on" - * when suspend_device_irqs() and resume_device_irqs() are - * executed during system suspend and resume, respectively, so - * that those functions don't crash while accessing the INTCS. - */ - pm_genpd_poweron(&sh7372_a4r.genpd); - break; - case PM_POST_SUSPEND: - pm_genpd_poweroff_unused(); - break; - } - - return NOTIFY_DONE; -} - static void sh7372_suspend_init(void) { shmobile_suspend_ops.enter = sh7372_enter_suspend; - pm_notifier(sh7372_pm_notifier_fn, 0); } #else static void sh7372_suspend_init(void) {} @@ -580,6 +482,8 @@ void __init sh7372_pm_init(void) /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */ __raw_writel(0, PDNSEL); + sh7372_a3sp_init(); + sh7372_suspend_init(); sh7372_cpuidle_init(); } diff --git a/trunk/arch/arm/mach-shmobile/setup-sh7372.c b/trunk/arch/arm/mach-shmobile/setup-sh7372.c index c197f9d29d04..2380389e6ac5 100644 --- a/trunk/arch/arm/mach-shmobile/setup-sh7372.c +++ b/trunk/arch/arm/mach-shmobile/setup-sh7372.c @@ -994,16 +994,12 @@ void __init sh7372_add_standard_devices(void) sh7372_init_pm_domain(&sh7372_a4r); sh7372_init_pm_domain(&sh7372_a3rv); sh7372_init_pm_domain(&sh7372_a3ri); - sh7372_init_pm_domain(&sh7372_a4s); - sh7372_init_pm_domain(&sh7372_a3sp); sh7372_init_pm_domain(&sh7372_a3sg); + sh7372_init_pm_domain(&sh7372_a3sp); sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv); sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc); - sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sg); - sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sp); - platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); diff --git a/trunk/arch/arm/mach-shmobile/sleep-sh7372.S b/trunk/arch/arm/mach-shmobile/sleep-sh7372.S index 1d564674451d..f3ab3c5810ea 100644 --- a/trunk/arch/arm/mach-shmobile/sleep-sh7372.S +++ b/trunk/arch/arm/mach-shmobile/sleep-sh7372.S @@ -37,18 +37,13 @@ #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) .align 12 .text - .global sh7372_resume_core_standby_sysc -sh7372_resume_core_standby_sysc: + .global sh7372_resume_core_standby_a3sm +sh7372_resume_core_standby_a3sm: ldr pc, 1f 1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET -#define SPDCR 0xe6180008 - - /* A3SM & A4S power down */ - .global sh7372_do_idle_sysc -sh7372_do_idle_sysc: - mov r8, r0 /* sleep mode passed in r0 */ - + .global sh7372_do_idle_a3sm +sh7372_do_idle_a3sm: /* * Clear the SCTLR.C bit to prevent further data cache * allocation. Clearing SCTLR.C would make all the data accesses @@ -85,9 +80,13 @@ sh7372_do_idle_sysc: dsb dmb - /* SYSC power down */ +#define SPDCR 0xe6180008 +#define A3SM (1 << 12) + + /* A3SM power down */ ldr r0, =SPDCR - str r8, [r0] + ldr r1, =A3SM + str r1, [r0] 1: b 1b diff --git a/trunk/arch/arm/plat-samsung/include/plat/pm.h b/trunk/arch/arm/plat-samsung/include/plat/pm.h index a6bdee204f2a..dcf68709f9cf 100644 --- a/trunk/arch/arm/plat-samsung/include/plat/pm.h +++ b/trunk/arch/arm/plat-samsung/include/plat/pm.h @@ -22,7 +22,6 @@ struct sys_device; #ifdef CONFIG_PM extern __init int s3c_pm_init(void); -extern __init int s3c64xx_pm_init(void); #else @@ -30,11 +29,6 @@ static inline int s3c_pm_init(void) { return 0; } - -static inline int s3c64xx_pm_init(void) -{ - return 0; -} #endif /* configuration for the IRQ mask over sleep */ diff --git a/trunk/arch/sparc/kernel/pci_sun4v.c b/trunk/arch/sparc/kernel/pci_sun4v.c index af5755d20fbe..b272cda35a01 100644 --- a/trunk/arch/sparc/kernel/pci_sun4v.c +++ b/trunk/arch/sparc/kernel/pci_sun4v.c @@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, if (!irq) return -ENOMEM; - if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) - return -EINVAL; if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) return -EINVAL; + if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) + return -EINVAL; return irq; } diff --git a/trunk/arch/x86/net/bpf_jit_comp.c b/trunk/arch/x86/net/bpf_jit_comp.c index 7b65f752c5f8..bfab3fa10edc 100644 --- a/trunk/arch/x86/net/bpf_jit_comp.c +++ b/trunk/arch/x86/net/bpf_jit_comp.c @@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; break; } if (filter[i].jt != 0) { - if (filter[i].jf && f_offset) - t_offset += is_near(f_offset) ? 2 : 5; + if (filter[i].jf) + t_offset += is_near(f_offset) ? 2 : 6; EMIT_COND_JMP(t_op, t_offset); if (filter[i].jf) EMIT_JMP(f_offset); diff --git a/trunk/drivers/ata/Kconfig b/trunk/drivers/ata/Kconfig index cf047c406d92..6bdedd7cca2c 100644 --- a/trunk/drivers/ata/Kconfig +++ b/trunk/drivers/ata/Kconfig @@ -820,7 +820,7 @@ config PATA_PLATFORM config PATA_OF_PLATFORM tristate "OpenFirmware platform device PATA support" - depends on PATA_PLATFORM && OF && OF_IRQ + depends on PATA_PLATFORM && OF help This option enables support for generic directly connected ATA devices commonly found on embedded systems with OpenFirmware diff --git a/trunk/drivers/base/power/Makefile b/trunk/drivers/base/power/Makefile index 2e58ebb1f6c0..81676dd17900 100644 --- a/trunk/drivers/base/power/Makefile +++ b/trunk/drivers/base/power/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o -obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o +obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/trunk/drivers/base/power/domain.c b/trunk/drivers/base/power/domain.c index 92e6a9048065..6790cf7eba5a 100644 --- a/trunk/drivers/base/power/domain.c +++ b/trunk/drivers/base/power/domain.c @@ -15,44 +15,13 @@ #include #include #include -#include - -#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ -({ \ - type (*__routine)(struct device *__d); \ - type __ret = (type)0; \ - \ - __routine = genpd->dev_ops.callback; \ - if (__routine) { \ - __ret = __routine(dev); \ - } else { \ - __routine = dev_gpd_data(dev)->ops.callback; \ - if (__routine) \ - __ret = __routine(dev); \ - } \ - __ret; \ -}) - -#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ -({ \ - ktime_t __start = ktime_get(); \ - type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ - s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ - struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \ - if (__elapsed > __gpd_data->td.field) { \ - __gpd_data->td.field = __elapsed; \ - dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ - __elapsed); \ - } \ - __retval; \ -}) static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); #ifdef CONFIG_PM -struct generic_pm_domain *dev_to_genpd(struct device *dev) +static struct generic_pm_domain *dev_to_genpd(struct device *dev) { if (IS_ERR_OR_NULL(dev->pm_domain)) return ERR_PTR(-EINVAL); @@ -60,31 +29,6 @@ struct generic_pm_domain *dev_to_genpd(struct device *dev) return pd_to_genpd(dev->pm_domain); } -static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, - stop_latency_ns, "stop"); -} - -static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, - start_latency_ns, "start"); -} - -static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, - save_state_latency_ns, "state save"); -} - -static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, - restore_state_latency_ns, - "state restore"); -} - static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; @@ -201,21 +145,9 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) } if (genpd->power_on) { - ktime_t time_start = ktime_get(); - s64 elapsed_ns; - ret = genpd->power_on(genpd); if (ret) goto err; - - elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); - if (elapsed_ns > genpd->power_on_latency_ns) { - genpd->power_on_latency_ns = elapsed_ns; - if (genpd->name) - pr_warning("%s: Power-on latency exceeded, " - "new value %lld ns\n", genpd->name, - elapsed_ns); - } } genpd_set_active(genpd); @@ -258,6 +190,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd, { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; + struct device_driver *drv = dev->driver; int ret = 0; if (gpd_data->need_restore) @@ -265,9 +198,15 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd, mutex_unlock(&genpd->lock); - genpd_start_dev(genpd, dev); - ret = genpd_save_dev(genpd, dev); - genpd_stop_dev(genpd, dev); + if (drv && drv->pm && drv->pm->runtime_suspend) { + if (genpd->start_device) + genpd->start_device(dev); + + ret = drv->pm->runtime_suspend(dev); + + if (genpd->stop_device) + genpd->stop_device(dev); + } mutex_lock(&genpd->lock); @@ -288,15 +227,22 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd, { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; + struct device_driver *drv = dev->driver; if (!gpd_data->need_restore) return; mutex_unlock(&genpd->lock); - genpd_start_dev(genpd, dev); - genpd_restore_dev(genpd, dev); - genpd_stop_dev(genpd, dev); + if (drv && drv->pm && drv->pm->runtime_resume) { + if (genpd->start_device) + genpd->start_device(dev); + + drv->pm->runtime_resume(dev); + + if (genpd->stop_device) + genpd->stop_device(dev); + } mutex_lock(&genpd->lock); @@ -408,16 +354,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) } if (genpd->power_off) { - ktime_t time_start; - s64 elapsed_ns; - if (atomic_read(&genpd->sd_count) > 0) { ret = -EBUSY; goto out; } - time_start = ktime_get(); - /* * If sd_count > 0 at this point, one of the subdomains hasn't * managed to call pm_genpd_poweron() for the master yet after @@ -431,29 +372,9 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd_set_active(genpd); goto out; } - - elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); - if (elapsed_ns > genpd->power_off_latency_ns) { - genpd->power_off_latency_ns = elapsed_ns; - if (genpd->name) - pr_warning("%s: Power-off latency exceeded, " - "new value %lld ns\n", genpd->name, - elapsed_ns); - } } genpd->status = GPD_STATE_POWER_OFF; - genpd->power_off_time = ktime_get(); - - /* Update PM QoS information for devices in the domain. */ - list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { - struct gpd_timing_data *td = &to_gpd_data(pdd)->td; - - pm_runtime_update_max_time_suspended(pdd->dev, - td->start_latency_ns + - td->restore_state_latency_ns + - genpd->power_on_latency_ns); - } list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); @@ -492,8 +413,6 @@ static void genpd_power_off_work_fn(struct work_struct *work) static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; - bool (*stop_ok)(struct device *__dev); - int ret; dev_dbg(dev, "%s()\n", __func__); @@ -503,16 +422,11 @@ static int pm_genpd_runtime_suspend(struct device *dev) might_sleep_if(!genpd->dev_irq_safe); - stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; - if (stop_ok && !stop_ok(dev)) - return -EBUSY; - - ret = genpd_stop_dev(genpd, dev); - if (ret) - return ret; - - pm_runtime_update_max_time_suspended(dev, - dev_gpd_data(dev)->td.start_latency_ns); + if (genpd->stop_device) { + int ret = genpd->stop_device(dev); + if (ret) + return ret; + } /* * If power.irq_safe is set, this routine will be run with interrupts @@ -588,7 +502,8 @@ static int pm_genpd_runtime_resume(struct device *dev) mutex_unlock(&genpd->lock); out: - genpd_start_dev(genpd, dev); + if (genpd->start_device) + genpd->start_device(dev); return 0; } @@ -619,52 +534,6 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {} #ifdef CONFIG_PM_SLEEP -static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, - struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); -} - -static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); -} - -static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); -} - -static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); -} - -static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, resume, dev); -} - -static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); -} - -static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); -} - -static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); -} - -static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); -} - /** * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. @@ -721,7 +590,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) if (!device_can_wakeup(dev)) return false; - active_wakeup = genpd_dev_active_wakeup(genpd, dev); + active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev); return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; } @@ -777,7 +646,7 @@ static int pm_genpd_prepare(struct device *dev) /* * The PM domain must be in the GPD_STATE_ACTIVE state at this point, * so pm_genpd_poweron() will return immediately, but if the device - * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need + * is suspended (e.g. it's been stopped by .stop_device()), we need * to make it operational. */ pm_runtime_resume(dev); @@ -816,7 +685,7 @@ static int pm_genpd_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); } /** @@ -841,14 +710,16 @@ static int pm_genpd_suspend_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - ret = genpd_suspend_late(genpd, dev); + ret = pm_generic_suspend_noirq(dev); if (ret) return ret; - if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) + if (dev->power.wakeup_path + && genpd->active_wakeup && genpd->active_wakeup(dev)) return 0; - genpd_stop_dev(genpd, dev); + if (genpd->stop_device) + genpd->stop_device(dev); /* * Since all of the "noirq" callbacks are executed sequentially, it is @@ -890,9 +761,10 @@ static int pm_genpd_resume_noirq(struct device *dev) */ pm_genpd_poweron(genpd); genpd->suspended_count--; - genpd_start_dev(genpd, dev); + if (genpd->start_device) + genpd->start_device(dev); - return genpd_resume_early(genpd, dev); + return pm_generic_resume_noirq(dev); } /** @@ -913,7 +785,7 @@ static int pm_genpd_resume(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); } /** @@ -934,7 +806,7 @@ static int pm_genpd_freeze(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); } /** @@ -960,11 +832,12 @@ static int pm_genpd_freeze_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - ret = genpd_freeze_late(genpd, dev); + ret = pm_generic_freeze_noirq(dev); if (ret) return ret; - genpd_stop_dev(genpd, dev); + if (genpd->stop_device) + genpd->stop_device(dev); return 0; } @@ -991,9 +864,10 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - genpd_start_dev(genpd, dev); + if (genpd->start_device) + genpd->start_device(dev); - return genpd_thaw_early(genpd, dev); + return pm_generic_thaw_noirq(dev); } /** @@ -1014,7 +888,72 @@ static int pm_genpd_thaw(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); +} + +/** + * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain. + * @dev: Device to suspend. + * + * Power off a device under the assumption that its pm_domain field points to + * the domain member of an object of type struct generic_pm_domain representing + * a PM domain consisting of I/O devices. + */ +static int pm_genpd_dev_poweroff(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev); +} + +/** + * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain. + * @dev: Device to suspend. + * + * Carry out a late powering off of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a PM domain consisting of I/O devices. + */ +static int pm_genpd_dev_poweroff_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (genpd->suspend_power_off) + return 0; + + ret = pm_generic_poweroff_noirq(dev); + if (ret) + return ret; + + if (dev->power.wakeup_path + && genpd->active_wakeup && genpd->active_wakeup(dev)) + return 0; + + if (genpd->stop_device) + genpd->stop_device(dev); + + /* + * Since all of the "noirq" callbacks are executed sequentially, it is + * guaranteed that this function will never run twice in parallel for + * the same PM domain, so it is not necessary to use locking here. + */ + genpd->suspended_count++; + pm_genpd_sync_poweroff(genpd); + + return 0; } /** @@ -1054,9 +993,31 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_poweron(genpd); genpd->suspended_count--; - genpd_start_dev(genpd, dev); + if (genpd->start_device) + genpd->start_device(dev); + + return pm_generic_restore_noirq(dev); +} + +/** + * pm_genpd_restore - Restore a device belonging to an I/O power domain. + * @dev: Device to resume. + * + * Restore a device under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static int pm_genpd_restore(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; - return genpd_resume_early(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_restore(dev); } /** @@ -1106,19 +1067,20 @@ static void pm_genpd_complete(struct device *dev) #define pm_genpd_freeze_noirq NULL #define pm_genpd_thaw_noirq NULL #define pm_genpd_thaw NULL +#define pm_genpd_dev_poweroff_noirq NULL +#define pm_genpd_dev_poweroff NULL #define pm_genpd_restore_noirq NULL +#define pm_genpd_restore NULL #define pm_genpd_complete NULL #endif /* CONFIG_PM_SLEEP */ /** - * __pm_genpd_add_device - Add a device to an I/O PM domain. + * pm_genpd_add_device - Add a device to an I/O PM domain. * @genpd: PM domain to add the device to. * @dev: Device to be added. - * @td: Set of PM QoS timing parameters to attach to the device. */ -int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, - struct gpd_timing_data *td) +int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) { struct generic_pm_domain_data *gpd_data; struct pm_domain_data *pdd; @@ -1161,8 +1123,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, gpd_data->base.dev = dev; gpd_data->need_restore = false; list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); - if (td) - gpd_data->td = *td; out: genpd_release_lock(genpd); @@ -1319,204 +1279,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, return ret; } -/** - * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. - * @dev: Device to add the callbacks to. - * @ops: Set of callbacks to add. - * @td: Timing data to add to the device along with the callbacks (optional). - */ -int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, - struct gpd_timing_data *td) -{ - struct pm_domain_data *pdd; - int ret = 0; - - if (!(dev && dev->power.subsys_data && ops)) - return -EINVAL; - - pm_runtime_disable(dev); - device_pm_lock(); - - pdd = dev->power.subsys_data->domain_data; - if (pdd) { - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - - gpd_data->ops = *ops; - if (td) - gpd_data->td = *td; - } else { - ret = -EINVAL; - } - - device_pm_unlock(); - pm_runtime_enable(dev); - - return ret; -} -EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); - -/** - * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. - * @dev: Device to remove the callbacks from. - * @clear_td: If set, clear the device's timing data too. - */ -int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) -{ - struct pm_domain_data *pdd; - int ret = 0; - - if (!(dev && dev->power.subsys_data)) - return -EINVAL; - - pm_runtime_disable(dev); - device_pm_lock(); - - pdd = dev->power.subsys_data->domain_data; - if (pdd) { - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - - gpd_data->ops = (struct gpd_dev_ops){ 0 }; - if (clear_td) - gpd_data->td = (struct gpd_timing_data){ 0 }; - } else { - ret = -EINVAL; - } - - device_pm_unlock(); - pm_runtime_enable(dev); - - return ret; -} -EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); - -/* Default device callbacks for generic PM domains. */ - -/** - * pm_genpd_default_save_state - Default "save device state" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_save_state(struct device *dev) -{ - int (*cb)(struct device *__dev); - struct device_driver *drv = dev->driver; - - cb = dev_gpd_data(dev)->ops.save_state; - if (cb) - return cb(dev); - - if (drv && drv->pm && drv->pm->runtime_suspend) - return drv->pm->runtime_suspend(dev); - - return 0; -} - -/** - * pm_genpd_default_restore_state - Default PM domians "restore device state". - * @dev: Device to handle. - */ -static int pm_genpd_default_restore_state(struct device *dev) -{ - int (*cb)(struct device *__dev); - struct device_driver *drv = dev->driver; - - cb = dev_gpd_data(dev)->ops.restore_state; - if (cb) - return cb(dev); - - if (drv && drv->pm && drv->pm->runtime_resume) - return drv->pm->runtime_resume(dev); - - return 0; -} - -/** - * pm_genpd_default_suspend - Default "device suspend" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_suspend(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; - - return cb ? cb(dev) : pm_generic_suspend(dev); -} - -/** - * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_suspend_late(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; - - return cb ? cb(dev) : pm_generic_suspend_noirq(dev); -} - -/** - * pm_genpd_default_resume_early - Default "early device resume" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_resume_early(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; - - return cb ? cb(dev) : pm_generic_resume_noirq(dev); -} - -/** - * pm_genpd_default_resume - Default "device resume" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_resume(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; - - return cb ? cb(dev) : pm_generic_resume(dev); -} - -/** - * pm_genpd_default_freeze - Default "device freeze" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_freeze(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; - - return cb ? cb(dev) : pm_generic_freeze(dev); -} - -/** - * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_freeze_late(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; - - return cb ? cb(dev) : pm_generic_freeze_noirq(dev); -} - -/** - * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_thaw_early(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; - - return cb ? cb(dev) : pm_generic_thaw_noirq(dev); -} - -/** - * pm_genpd_default_thaw - Default "device thaw" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_thaw(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; - - return cb ? cb(dev) : pm_generic_thaw(dev); -} - /** * pm_genpd_init - Initialize a generic I/O PM domain object. * @genpd: PM domain object to initialize. @@ -1543,7 +1305,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->resume_count = 0; genpd->device_count = 0; genpd->suspended_count = 0; - genpd->max_off_time_ns = -1; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; @@ -1556,21 +1317,11 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; genpd->domain.ops.thaw = pm_genpd_thaw; - genpd->domain.ops.poweroff = pm_genpd_suspend; - genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; + genpd->domain.ops.poweroff = pm_genpd_dev_poweroff; + genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq; genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; - genpd->domain.ops.restore = pm_genpd_resume; + genpd->domain.ops.restore = pm_genpd_restore; genpd->domain.ops.complete = pm_genpd_complete; - genpd->dev_ops.save_state = pm_genpd_default_save_state; - genpd->dev_ops.restore_state = pm_genpd_default_restore_state; - genpd->dev_ops.suspend = pm_genpd_default_suspend; - genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; - genpd->dev_ops.resume_early = pm_genpd_default_resume_early; - genpd->dev_ops.resume = pm_genpd_default_resume; - genpd->dev_ops.freeze = pm_genpd_default_freeze; - genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; - genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; - genpd->dev_ops.thaw = pm_genpd_default_thaw; mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); diff --git a/trunk/drivers/base/power/domain_governor.c b/trunk/drivers/base/power/domain_governor.c deleted file mode 100644 index 51527ee92d10..000000000000 --- a/trunk/drivers/base/power/domain_governor.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * drivers/base/power/domain_governor.c - Governors for device PM domains. - * - * Copyright (C) 2011 Rafael J. Wysocki , Renesas Electronics Corp. - * - * This file is released under the GPLv2. - */ - -#include -#include -#include -#include -#include - -/** - * default_stop_ok - Default PM domain governor routine for stopping devices. - * @dev: Device to check. - */ -bool default_stop_ok(struct device *dev) -{ - struct gpd_timing_data *td = &dev_gpd_data(dev)->td; - - dev_dbg(dev, "%s()\n", __func__); - - if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0) - return true; - - return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns - && td->break_even_ns < dev->power.max_time_suspended_ns; -} - -/** - * default_power_down_ok - Default generic PM domain power off governor routine. - * @pd: PM domain to check. - * - * This routine must be executed under the PM domain's lock. - */ -static bool default_power_down_ok(struct dev_pm_domain *pd) -{ - struct generic_pm_domain *genpd = pd_to_genpd(pd); - struct gpd_link *link; - struct pm_domain_data *pdd; - s64 min_dev_off_time_ns; - s64 off_on_time_ns; - ktime_t time_now = ktime_get(); - - off_on_time_ns = genpd->power_off_latency_ns + - genpd->power_on_latency_ns; - /* - * It doesn't make sense to remove power from the domain if saving - * the state of all devices in it and the power off/power on operations - * take too much time. - * - * All devices in this domain have been stopped already at this point. - */ - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - if (pdd->dev->driver) - off_on_time_ns += - to_gpd_data(pdd)->td.save_state_latency_ns; - } - - /* - * Check if subdomains can be off for enough time. - * - * All subdomains have been powered off already at this point. - */ - list_for_each_entry(link, &genpd->master_links, master_node) { - struct generic_pm_domain *sd = link->slave; - s64 sd_max_off_ns = sd->max_off_time_ns; - - if (sd_max_off_ns < 0) - continue; - - sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now, - sd->power_off_time)); - /* - * Check if the subdomain is allowed to be off long enough for - * the current domain to turn off and on (that's how much time - * it will have to wait worst case). - */ - if (sd_max_off_ns <= off_on_time_ns) - return false; - } - - /* - * Check if the devices in the domain can be off enough time. - */ - min_dev_off_time_ns = -1; - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - struct gpd_timing_data *td; - struct device *dev = pdd->dev; - s64 dev_off_time_ns; - - if (!dev->driver || dev->power.max_time_suspended_ns < 0) - continue; - - td = &to_gpd_data(pdd)->td; - dev_off_time_ns = dev->power.max_time_suspended_ns - - (td->start_latency_ns + td->restore_state_latency_ns + - ktime_to_ns(ktime_sub(time_now, - dev->power.suspend_time))); - if (dev_off_time_ns <= off_on_time_ns) - return false; - - if (min_dev_off_time_ns > dev_off_time_ns - || min_dev_off_time_ns < 0) - min_dev_off_time_ns = dev_off_time_ns; - } - - if (min_dev_off_time_ns < 0) { - /* - * There are no latency constraints, so the domain can spend - * arbitrary time in the "off" state. - */ - genpd->max_off_time_ns = -1; - return true; - } - - /* - * The difference between the computed minimum delta and the time needed - * to turn the domain on is the maximum theoretical time this domain can - * spend in the "off" state. - */ - min_dev_off_time_ns -= genpd->power_on_latency_ns; - - /* - * If the difference between the computed minimum delta and the time - * needed to turn the domain off and back on on is smaller than the - * domain's power break even time, removing power from the domain is not - * worth it. - */ - if (genpd->break_even_ns > - min_dev_off_time_ns - genpd->power_off_latency_ns) - return false; - - genpd->max_off_time_ns = min_dev_off_time_ns; - return true; -} - -struct dev_power_governor simple_qos_governor = { - .stop_ok = default_stop_ok, - .power_down_ok = default_power_down_ok, -}; - -static bool always_on_power_down_ok(struct dev_pm_domain *domain) -{ - return false; -} - -/** - * pm_genpd_gov_always_on - A governor implementing an always-on policy - */ -struct dev_power_governor pm_domain_always_on_gov = { - .power_down_ok = always_on_power_down_ok, - .stop_ok = default_stop_ok, -}; diff --git a/trunk/drivers/base/power/qos.c b/trunk/drivers/base/power/qos.c index c5d358837461..86de6c50fc41 100644 --- a/trunk/drivers/base/power/qos.c +++ b/trunk/drivers/base/power/qos.c @@ -47,29 +47,21 @@ static DEFINE_MUTEX(dev_pm_qos_mtx); static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); /** - * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. - * @dev: Device to get the PM QoS constraint value for. - * - * This routine must be called with dev->power.lock held. - */ -s32 __dev_pm_qos_read_value(struct device *dev) -{ - struct pm_qos_constraints *c = dev->power.constraints; - - return c ? pm_qos_read_value(c) : 0; -} - -/** - * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked). + * dev_pm_qos_read_value - Get PM QoS constraint for a given device. * @dev: Device to get the PM QoS constraint value for. */ s32 dev_pm_qos_read_value(struct device *dev) { + struct pm_qos_constraints *c; unsigned long flags; - s32 ret; + s32 ret = 0; spin_lock_irqsave(&dev->power.lock, flags); - ret = __dev_pm_qos_read_value(dev); + + c = dev->power.constraints; + if (c) + ret = pm_qos_read_value(c); + spin_unlock_irqrestore(&dev->power.lock, flags); return ret; @@ -420,28 +412,3 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); } EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); - -/** - * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. - * @dev: Device whose ancestor to add the request for. - * @req: Pointer to the preallocated handle. - * @value: Constraint latency value. - */ -int dev_pm_qos_add_ancestor_request(struct device *dev, - struct dev_pm_qos_request *req, s32 value) -{ - struct device *ancestor = dev->parent; - int error = -ENODEV; - - while (ancestor && !ancestor->power.ignore_children) - ancestor = ancestor->parent; - - if (ancestor) - error = dev_pm_qos_add_request(ancestor, req, value); - - if (error) - req->dev = NULL; - - return error; -} -EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); diff --git a/trunk/drivers/base/power/runtime.c b/trunk/drivers/base/power/runtime.c index 541f821d4ea6..c56efd756531 100644 --- a/trunk/drivers/base/power/runtime.c +++ b/trunk/drivers/base/power/runtime.c @@ -282,47 +282,6 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) return retval != -EACCES ? retval : -EIO; } -struct rpm_qos_data { - ktime_t time_now; - s64 constraint_ns; -}; - -/** - * rpm_update_qos_constraint - Update a given PM QoS constraint data. - * @dev: Device whose timing data to use. - * @data: PM QoS constraint data to update. - * - * Use the suspend timing data of @dev to update PM QoS constraint data pointed - * to by @data. - */ -static int rpm_update_qos_constraint(struct device *dev, void *data) -{ - struct rpm_qos_data *qos = data; - unsigned long flags; - s64 delta_ns; - int ret = 0; - - spin_lock_irqsave(&dev->power.lock, flags); - - if (dev->power.max_time_suspended_ns < 0) - goto out; - - delta_ns = dev->power.max_time_suspended_ns - - ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time)); - if (delta_ns <= 0) { - ret = -EBUSY; - goto out; - } - - if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0) - qos->constraint_ns = delta_ns; - - out: - spin_unlock_irqrestore(&dev->power.lock, flags); - - return ret; -} - /** * rpm_suspend - Carry out runtime suspend of given device. * @dev: Device to suspend. @@ -349,7 +308,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) { int (*callback)(struct device *); struct device *parent = NULL; - struct rpm_qos_data qos; int retval; trace_rpm_suspend(dev, rpmflags); @@ -445,38 +403,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto out; } - qos.constraint_ns = __dev_pm_qos_read_value(dev); - if (qos.constraint_ns < 0) { - /* Negative constraint means "never suspend". */ - retval = -EPERM; - goto out; - } - qos.constraint_ns *= NSEC_PER_USEC; - qos.time_now = ktime_get(); - __update_runtime_status(dev, RPM_SUSPENDING); - if (!dev->power.ignore_children) { - if (dev->power.irq_safe) - spin_unlock(&dev->power.lock); - else - spin_unlock_irq(&dev->power.lock); - - retval = device_for_each_child(dev, &qos, - rpm_update_qos_constraint); - - if (dev->power.irq_safe) - spin_lock(&dev->power.lock); - else - spin_lock_irq(&dev->power.lock); - - if (retval) - goto fail; - } - - dev->power.suspend_time = qos.time_now; - dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1; - if (dev->pm_domain) callback = dev->pm_domain->ops.runtime_suspend; else if (dev->type && dev->type->pm) @@ -492,9 +420,27 @@ static int rpm_suspend(struct device *dev, int rpmflags) callback = dev->driver->pm->runtime_suspend; retval = rpm_callback(callback, dev); - if (retval) - goto fail; + if (retval) { + __update_runtime_status(dev, RPM_ACTIVE); + dev->power.deferred_resume = false; + if (retval == -EAGAIN || retval == -EBUSY) { + dev->power.runtime_error = 0; + /* + * If the callback routine failed an autosuspend, and + * if the last_busy time has been updated so that there + * is a new autosuspend expiration time, automatically + * reschedule another autosuspend. + */ + if ((rpmflags & RPM_AUTO) && + pm_runtime_autosuspend_expiration(dev) != 0) + goto repeat; + } else { + pm_runtime_cancel_pending(dev); + } + wake_up_all(&dev->power.wait_queue); + goto out; + } no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -526,29 +472,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; - - fail: - __update_runtime_status(dev, RPM_ACTIVE); - dev->power.suspend_time = ktime_set(0, 0); - dev->power.max_time_suspended_ns = -1; - dev->power.deferred_resume = false; - if (retval == -EAGAIN || retval == -EBUSY) { - dev->power.runtime_error = 0; - - /* - * If the callback routine failed an autosuspend, and - * if the last_busy time has been updated so that there - * is a new autosuspend expiration time, automatically - * reschedule another autosuspend. - */ - if ((rpmflags & RPM_AUTO) && - pm_runtime_autosuspend_expiration(dev) != 0) - goto repeat; - } else { - pm_runtime_cancel_pending(dev); - } - wake_up_all(&dev->power.wait_queue); - goto out; } /** @@ -703,9 +626,6 @@ static int rpm_resume(struct device *dev, int rpmflags) if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ - dev->power.suspend_time = ktime_set(0, 0); - dev->power.max_time_suspended_ns = -1; - __update_runtime_status(dev, RPM_RESUMING); if (dev->pm_domain) @@ -1368,9 +1288,6 @@ void pm_runtime_init(struct device *dev) setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, (unsigned long)dev); - dev->power.suspend_time = ktime_set(0, 0); - dev->power.max_time_suspended_ns = -1; - init_waitqueue_head(&dev->power.wait_queue); } @@ -1388,28 +1305,3 @@ void pm_runtime_remove(struct device *dev) if (dev->power.irq_safe && dev->parent) pm_runtime_put_sync(dev->parent); } - -/** - * pm_runtime_update_max_time_suspended - Update device's suspend time data. - * @dev: Device to handle. - * @delta_ns: Value to subtract from the device's max_time_suspended_ns field. - * - * Update the device's power.max_time_suspended_ns field by subtracting - * @delta_ns from it. The resulting value of power.max_time_suspended_ns is - * never negative. - */ -void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns) -{ - unsigned long flags; - - spin_lock_irqsave(&dev->power.lock, flags); - - if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) { - if (dev->power.max_time_suspended_ns > delta_ns) - dev->power.max_time_suspended_ns -= delta_ns; - else - dev->power.max_time_suspended_ns = 0; - } - - spin_unlock_irqrestore(&dev->power.lock, flags); -} diff --git a/trunk/drivers/devfreq/Kconfig b/trunk/drivers/devfreq/Kconfig index 464fa2147dfb..8f0491037080 100644 --- a/trunk/drivers/devfreq/Kconfig +++ b/trunk/drivers/devfreq/Kconfig @@ -65,17 +65,4 @@ config DEVFREQ_GOV_USERSPACE comment "DEVFREQ Drivers" -config ARM_EXYNOS4_BUS_DEVFREQ - bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver" - depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412 - select ARCH_HAS_OPP - select DEVFREQ_GOV_SIMPLE_ONDEMAND - help - This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int) - and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int). - It reads PPMU counters of memory controllers and adjusts - the operating frequencies and voltages with OPP support. - To operate with optimal voltages, ASV support is required - (CONFIG_EXYNOS_ASV). - endif # PM_DEVFREQ diff --git a/trunk/drivers/devfreq/Makefile b/trunk/drivers/devfreq/Makefile index 8c464234f7e7..4564a89e970a 100644 --- a/trunk/drivers/devfreq/Makefile +++ b/trunk/drivers/devfreq/Makefile @@ -3,6 +3,3 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o - -# DEVFREQ Drivers -obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o diff --git a/trunk/drivers/devfreq/devfreq.c b/trunk/drivers/devfreq/devfreq.c index c189b82f5ece..59d24e9cb8c5 100644 --- a/trunk/drivers/devfreq/devfreq.c +++ b/trunk/drivers/devfreq/devfreq.c @@ -347,7 +347,7 @@ struct devfreq *devfreq_add_device(struct device *dev, if (!IS_ERR(devfreq)) { dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); err = -EINVAL; - goto err_out; + goto out; } } @@ -356,7 +356,7 @@ struct devfreq *devfreq_add_device(struct device *dev, dev_err(dev, "%s: Unable to create devfreq for the device\n", __func__); err = -ENOMEM; - goto err_out; + goto out; } mutex_init(&devfreq->lock); @@ -399,16 +399,17 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->next_polling); } mutex_unlock(&devfreq_list_lock); -out: - return devfreq; - + goto out; err_init: device_unregister(&devfreq->dev); err_dev: mutex_unlock(&devfreq->lock); kfree(devfreq); -err_out: - return ERR_PTR(err); +out: + if (err) + return ERR_PTR(err); + else + return devfreq; } /** diff --git a/trunk/drivers/devfreq/exynos4_bus.c b/trunk/drivers/devfreq/exynos4_bus.c deleted file mode 100644 index 6460577d6701..000000000000 --- a/trunk/drivers/devfreq/exynos4_bus.c +++ /dev/null @@ -1,1135 +0,0 @@ -/* drivers/devfreq/exynos4210_memorybus.c - * - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * http://www.samsung.com/ - * MyungJoo Ham - * - * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework - * This version supports EXYNOS4210 only. This changes bus frequencies - * and vddint voltages. Exynos4412/4212 should be able to be supported - * with minor modifications. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Exynos4 ASV has been in the mailing list, but not upstreamed, yet. */ -#ifdef CONFIG_EXYNOS_ASV -extern unsigned int exynos_result_of_asv; -#endif - -#include - -#include - -#define MAX_SAFEVOLT 1200000 /* 1.2V */ - -enum exynos4_busf_type { - TYPE_BUSF_EXYNOS4210, - TYPE_BUSF_EXYNOS4x12, -}; - -/* Assume that the bus is saturated if the utilization is 40% */ -#define BUS_SATURATION_RATIO 40 - -enum ppmu_counter { - PPMU_PMNCNT0 = 0, - PPMU_PMCCNT1, - PPMU_PMNCNT2, - PPMU_PMNCNT3, - PPMU_PMNCNT_MAX, -}; -struct exynos4_ppmu { - void __iomem *hw_base; - unsigned int ccnt; - unsigned int event; - unsigned int count[PPMU_PMNCNT_MAX]; - bool ccnt_overflow; - bool count_overflow[PPMU_PMNCNT_MAX]; -}; - -enum busclk_level_idx { - LV_0 = 0, - LV_1, - LV_2, - LV_3, - LV_4, - _LV_END -}; -#define EX4210_LV_MAX LV_2 -#define EX4x12_LV_MAX LV_4 -#define EX4210_LV_NUM (LV_2 + 1) -#define EX4x12_LV_NUM (LV_4 + 1) - -struct busfreq_data { - enum exynos4_busf_type type; - struct device *dev; - struct devfreq *devfreq; - bool disabled; - struct regulator *vdd_int; - struct regulator *vdd_mif; /* Exynos4412/4212 only */ - struct opp *curr_opp; - struct exynos4_ppmu dmc[2]; - - struct notifier_block pm_notifier; - struct mutex lock; - - /* Dividers calculated at boot/probe-time */ - unsigned int dmc_divtable[_LV_END]; /* DMC0 */ - unsigned int top_divtable[_LV_END]; -}; - -struct bus_opp_table { - unsigned int idx; - unsigned long clk; - unsigned long volt; -}; - -/* 4210 controls clock of mif and voltage of int */ -static struct bus_opp_table exynos4210_busclk_table[] = { - {LV_0, 400000, 1150000}, - {LV_1, 267000, 1050000}, - {LV_2, 133000, 1025000}, - {0, 0, 0}, -}; - -/* - * MIF is the main control knob clock for exynox4x12 MIF/INT - * clock and voltage of both mif/int are controlled. - */ -static struct bus_opp_table exynos4x12_mifclk_table[] = { - {LV_0, 400000, 1100000}, - {LV_1, 267000, 1000000}, - {LV_2, 160000, 950000}, - {LV_3, 133000, 950000}, - {LV_4, 100000, 950000}, - {0, 0, 0}, -}; - -/* - * INT is not the control knob of 4x12. LV_x is not meant to represent - * the current performance. (MIF does) - */ -static struct bus_opp_table exynos4x12_intclk_table[] = { - {LV_0, 200000, 1000000}, - {LV_1, 160000, 950000}, - {LV_2, 133000, 925000}, - {LV_3, 100000, 900000}, - {0, 0, 0}, -}; - -/* TODO: asv volt definitions are "__initdata"? */ -/* Some chips have different operating voltages */ -static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = { - {1150000, 1050000, 1050000}, - {1125000, 1025000, 1025000}, - {1100000, 1000000, 1000000}, - {1075000, 975000, 975000}, - {1050000, 950000, 950000}, -}; - -static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = { - /* 400 267 160 133 100 */ - {1050000, 950000, 900000, 900000, 900000}, /* ASV0 */ - {1050000, 950000, 900000, 900000, 900000}, /* ASV1 */ - {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */ - {1050000, 900000, 900000, 900000, 900000}, /* ASV3 */ - {1050000, 900000, 900000, 900000, 850000}, /* ASV4 */ - {1050000, 900000, 900000, 850000, 850000}, /* ASV5 */ - {1050000, 900000, 850000, 850000, 850000}, /* ASV6 */ - {1050000, 900000, 850000, 850000, 850000}, /* ASV7 */ - {1050000, 900000, 850000, 850000, 850000}, /* ASV8 */ -}; - -static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = { - /* 200 160 133 100 */ - {1000000, 950000, 925000, 900000}, /* ASV0 */ - {975000, 925000, 925000, 900000}, /* ASV1 */ - {950000, 925000, 900000, 875000}, /* ASV2 */ - {950000, 900000, 900000, 875000}, /* ASV3 */ - {925000, 875000, 875000, 875000}, /* ASV4 */ - {900000, 850000, 850000, 850000}, /* ASV5 */ - {900000, 850000, 850000, 850000}, /* ASV6 */ - {900000, 850000, 850000, 850000}, /* ASV7 */ - {900000, 850000, 850000, 850000}, /* ASV8 */ -}; - -/*** Clock Divider Data for Exynos4210 ***/ -static unsigned int exynos4210_clkdiv_dmc0[][8] = { - /* - * Clock divider value for following - * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD - * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS } - */ - - /* DMC L0: 400MHz */ - { 3, 1, 1, 1, 1, 1, 3, 1 }, - /* DMC L1: 266.7MHz */ - { 4, 1, 1, 2, 1, 1, 3, 1 }, - /* DMC L2: 133MHz */ - { 5, 1, 1, 5, 1, 1, 3, 1 }, -}; -static unsigned int exynos4210_clkdiv_top[][5] = { - /* - * Clock divider value for following - * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND } - */ - /* ACLK200 L0: 200MHz */ - { 3, 7, 4, 5, 1 }, - /* ACLK200 L1: 160MHz */ - { 4, 7, 5, 6, 1 }, - /* ACLK200 L2: 133MHz */ - { 5, 7, 7, 7, 1 }, -}; -static unsigned int exynos4210_clkdiv_lr_bus[][2] = { - /* - * Clock divider value for following - * { DIVGDL/R, DIVGPL/R } - */ - /* ACLK_GDL/R L1: 200MHz */ - { 3, 1 }, - /* ACLK_GDL/R L2: 160MHz */ - { 4, 1 }, - /* ACLK_GDL/R L3: 133MHz */ - { 5, 1 }, -}; - -/*** Clock Divider Data for Exynos4212/4412 ***/ -static unsigned int exynos4x12_clkdiv_dmc0[][6] = { - /* - * Clock divider value for following - * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD - * DIVDMCP} - */ - - /* DMC L0: 400MHz */ - {3, 1, 1, 1, 1, 1}, - /* DMC L1: 266.7MHz */ - {4, 1, 1, 2, 1, 1}, - /* DMC L2: 160MHz */ - {5, 1, 1, 4, 1, 1}, - /* DMC L3: 133MHz */ - {5, 1, 1, 5, 1, 1}, - /* DMC L4: 100MHz */ - {7, 1, 1, 7, 1, 1}, -}; -static unsigned int exynos4x12_clkdiv_dmc1[][6] = { - /* - * Clock divider value for following - * { G2DACP, DIVC2C, DIVC2C_ACLK } - */ - - /* DMC L0: 400MHz */ - {3, 1, 1}, - /* DMC L1: 266.7MHz */ - {4, 2, 1}, - /* DMC L2: 160MHz */ - {5, 4, 1}, - /* DMC L3: 133MHz */ - {5, 5, 1}, - /* DMC L4: 100MHz */ - {7, 7, 1}, -}; -static unsigned int exynos4x12_clkdiv_top[][5] = { - /* - * Clock divider value for following - * { DIVACLK266_GPS, DIVACLK100, DIVACLK160, - DIVACLK133, DIVONENAND } - */ - - /* ACLK_GDL/R L0: 200MHz */ - {2, 7, 4, 5, 1}, - /* ACLK_GDL/R L1: 200MHz */ - {2, 7, 4, 5, 1}, - /* ACLK_GDL/R L2: 160MHz */ - {4, 7, 5, 7, 1}, - /* ACLK_GDL/R L3: 133MHz */ - {4, 7, 5, 7, 1}, - /* ACLK_GDL/R L4: 100MHz */ - {7, 7, 7, 7, 1}, -}; -static unsigned int exynos4x12_clkdiv_lr_bus[][2] = { - /* - * Clock divider value for following - * { DIVGDL/R, DIVGPL/R } - */ - - /* ACLK_GDL/R L0: 200MHz */ - {3, 1}, - /* ACLK_GDL/R L1: 200MHz */ - {3, 1}, - /* ACLK_GDL/R L2: 160MHz */ - {4, 1}, - /* ACLK_GDL/R L3: 133MHz */ - {5, 1}, - /* ACLK_GDL/R L4: 100MHz */ - {7, 1}, -}; -static unsigned int exynos4x12_clkdiv_sclkip[][3] = { - /* - * Clock divider value for following - * { DIVMFC, DIVJPEG, DIVFIMC0~3} - */ - - /* SCLK_MFC: 200MHz */ - {3, 3, 4}, - /* SCLK_MFC: 200MHz */ - {3, 3, 4}, - /* SCLK_MFC: 160MHz */ - {4, 4, 5}, - /* SCLK_MFC: 133MHz */ - {5, 5, 5}, - /* SCLK_MFC: 100MHz */ - {7, 7, 7}, -}; - - -static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp) -{ - unsigned int index; - unsigned int tmp; - - for (index = LV_0; index < EX4210_LV_NUM; index++) - if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk) - break; - - if (index == EX4210_LV_NUM) - return -EINVAL; - - /* Change Divider - DMC0 */ - tmp = data->dmc_divtable[index]; - - __raw_writel(tmp, S5P_CLKDIV_DMC0); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0); - } while (tmp & 0x11111111); - - /* Change Divider - TOP */ - tmp = data->top_divtable[index]; - - __raw_writel(tmp, S5P_CLKDIV_TOP); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_TOP); - } while (tmp & 0x11111); - - /* Change Divider - LEFTBUS */ - tmp = __raw_readl(S5P_CLKDIV_LEFTBUS); - - tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK); - - tmp |= ((exynos4210_clkdiv_lr_bus[index][0] << - S5P_CLKDIV_BUS_GDLR_SHIFT) | - (exynos4210_clkdiv_lr_bus[index][1] << - S5P_CLKDIV_BUS_GPLR_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_LEFTBUS); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS); - } while (tmp & 0x11); - - /* Change Divider - RIGHTBUS */ - tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS); - - tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK); - - tmp |= ((exynos4210_clkdiv_lr_bus[index][0] << - S5P_CLKDIV_BUS_GDLR_SHIFT) | - (exynos4210_clkdiv_lr_bus[index][1] << - S5P_CLKDIV_BUS_GPLR_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS); - } while (tmp & 0x11); - - return 0; -} - -static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp) -{ - unsigned int index; - unsigned int tmp; - - for (index = LV_0; index < EX4x12_LV_NUM; index++) - if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk) - break; - - if (index == EX4x12_LV_NUM) - return -EINVAL; - - /* Change Divider - DMC0 */ - tmp = data->dmc_divtable[index]; - - __raw_writel(tmp, S5P_CLKDIV_DMC0); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0); - } while (tmp & 0x11111111); - - /* Change Divider - DMC1 */ - tmp = __raw_readl(S5P_CLKDIV_DMC1); - - tmp &= ~(S5P_CLKDIV_DMC1_G2D_ACP_MASK | - S5P_CLKDIV_DMC1_C2C_MASK | - S5P_CLKDIV_DMC1_C2CACLK_MASK); - - tmp |= ((exynos4x12_clkdiv_dmc1[index][0] << - S5P_CLKDIV_DMC1_G2D_ACP_SHIFT) | - (exynos4x12_clkdiv_dmc1[index][1] << - S5P_CLKDIV_DMC1_C2C_SHIFT) | - (exynos4x12_clkdiv_dmc1[index][2] << - S5P_CLKDIV_DMC1_C2CACLK_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_DMC1); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_DMC1); - } while (tmp & 0x111111); - - /* Change Divider - TOP */ - tmp = __raw_readl(S5P_CLKDIV_TOP); - - tmp &= ~(S5P_CLKDIV_TOP_ACLK266_GPS_MASK | - S5P_CLKDIV_TOP_ACLK100_MASK | - S5P_CLKDIV_TOP_ACLK160_MASK | - S5P_CLKDIV_TOP_ACLK133_MASK | - S5P_CLKDIV_TOP_ONENAND_MASK); - - tmp |= ((exynos4x12_clkdiv_top[index][0] << - S5P_CLKDIV_TOP_ACLK266_GPS_SHIFT) | - (exynos4x12_clkdiv_top[index][1] << - S5P_CLKDIV_TOP_ACLK100_SHIFT) | - (exynos4x12_clkdiv_top[index][2] << - S5P_CLKDIV_TOP_ACLK160_SHIFT) | - (exynos4x12_clkdiv_top[index][3] << - S5P_CLKDIV_TOP_ACLK133_SHIFT) | - (exynos4x12_clkdiv_top[index][4] << - S5P_CLKDIV_TOP_ONENAND_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_TOP); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_TOP); - } while (tmp & 0x11111); - - /* Change Divider - LEFTBUS */ - tmp = __raw_readl(S5P_CLKDIV_LEFTBUS); - - tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK); - - tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] << - S5P_CLKDIV_BUS_GDLR_SHIFT) | - (exynos4x12_clkdiv_lr_bus[index][1] << - S5P_CLKDIV_BUS_GPLR_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_LEFTBUS); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS); - } while (tmp & 0x11); - - /* Change Divider - RIGHTBUS */ - tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS); - - tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK); - - tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] << - S5P_CLKDIV_BUS_GDLR_SHIFT) | - (exynos4x12_clkdiv_lr_bus[index][1] << - S5P_CLKDIV_BUS_GPLR_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS); - } while (tmp & 0x11); - - /* Change Divider - MFC */ - tmp = __raw_readl(S5P_CLKDIV_MFC); - - tmp &= ~(S5P_CLKDIV_MFC_MASK); - - tmp |= ((exynos4x12_clkdiv_sclkip[index][0] << - S5P_CLKDIV_MFC_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_MFC); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_MFC); - } while (tmp & 0x1); - - /* Change Divider - JPEG */ - tmp = __raw_readl(S5P_CLKDIV_CAM1); - - tmp &= ~(S5P_CLKDIV_CAM1_JPEG_MASK); - - tmp |= ((exynos4x12_clkdiv_sclkip[index][1] << - S5P_CLKDIV_CAM1_JPEG_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_CAM1); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1); - } while (tmp & 0x1); - - /* Change Divider - FIMC0~3 */ - tmp = __raw_readl(S5P_CLKDIV_CAM); - - tmp &= ~(S5P_CLKDIV_CAM_FIMC0_MASK | S5P_CLKDIV_CAM_FIMC1_MASK | - S5P_CLKDIV_CAM_FIMC2_MASK | S5P_CLKDIV_CAM_FIMC3_MASK); - - tmp |= ((exynos4x12_clkdiv_sclkip[index][2] << - S5P_CLKDIV_CAM_FIMC0_SHIFT) | - (exynos4x12_clkdiv_sclkip[index][2] << - S5P_CLKDIV_CAM_FIMC1_SHIFT) | - (exynos4x12_clkdiv_sclkip[index][2] << - S5P_CLKDIV_CAM_FIMC2_SHIFT) | - (exynos4x12_clkdiv_sclkip[index][2] << - S5P_CLKDIV_CAM_FIMC3_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_CAM); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1); - } while (tmp & 0x1111); - - return 0; -} - - -static void busfreq_mon_reset(struct busfreq_data *data) -{ - unsigned int i; - - for (i = 0; i < 2; i++) { - void __iomem *ppmu_base = data->dmc[i].hw_base; - - /* Reset PPMU */ - __raw_writel(0x8000000f, ppmu_base + 0xf010); - __raw_writel(0x8000000f, ppmu_base + 0xf050); - __raw_writel(0x6, ppmu_base + 0xf000); - __raw_writel(0x0, ppmu_base + 0xf100); - - /* Set PPMU Event */ - data->dmc[i].event = 0x6; - __raw_writel(((data->dmc[i].event << 12) | 0x1), - ppmu_base + 0xfc); - - /* Start PPMU */ - __raw_writel(0x1, ppmu_base + 0xf000); - } -} - -static void exynos4_read_ppmu(struct busfreq_data *data) -{ - int i, j; - - for (i = 0; i < 2; i++) { - void __iomem *ppmu_base = data->dmc[i].hw_base; - u32 overflow; - - /* Stop PPMU */ - __raw_writel(0x0, ppmu_base + 0xf000); - - /* Update local data from PPMU */ - overflow = __raw_readl(ppmu_base + 0xf050); - - data->dmc[i].ccnt = __raw_readl(ppmu_base + 0xf100); - data->dmc[i].ccnt_overflow = overflow & (1 << 31); - - for (j = 0; j < PPMU_PMNCNT_MAX; j++) { - data->dmc[i].count[j] = __raw_readl( - ppmu_base + (0xf110 + (0x10 * j))); - data->dmc[i].count_overflow[j] = overflow & (1 << j); - } - } - - busfreq_mon_reset(data); -} - -static int exynos4x12_get_intspec(unsigned long mifclk) -{ - int i = 0; - - while (exynos4x12_intclk_table[i].clk) { - if (exynos4x12_intclk_table[i].clk <= mifclk) - return i; - i++; - } - - return -EINVAL; -} - -static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp, - struct opp *oldopp) -{ - int err = 0, tmp; - unsigned long volt = opp_get_voltage(opp); - - switch (data->type) { - case TYPE_BUSF_EXYNOS4210: - /* OPP represents DMC clock + INT voltage */ - err = regulator_set_voltage(data->vdd_int, volt, - MAX_SAFEVOLT); - break; - case TYPE_BUSF_EXYNOS4x12: - /* OPP represents MIF clock + MIF voltage */ - err = regulator_set_voltage(data->vdd_mif, volt, - MAX_SAFEVOLT); - if (err) - break; - - tmp = exynos4x12_get_intspec(opp_get_freq(opp)); - if (tmp < 0) { - err = tmp; - regulator_set_voltage(data->vdd_mif, - opp_get_voltage(oldopp), - MAX_SAFEVOLT); - break; - } - err = regulator_set_voltage(data->vdd_int, - exynos4x12_intclk_table[tmp].volt, - MAX_SAFEVOLT); - /* Try to recover */ - if (err) - regulator_set_voltage(data->vdd_mif, - opp_get_voltage(oldopp), - MAX_SAFEVOLT); - break; - default: - err = -EINVAL; - } - - return err; -} - -static int exynos4_bus_target(struct device *dev, unsigned long *_freq) -{ - int err = 0; - struct platform_device *pdev = container_of(dev, struct platform_device, - dev); - struct busfreq_data *data = platform_get_drvdata(pdev); - struct opp *opp = devfreq_recommended_opp(dev, _freq); - unsigned long old_freq = opp_get_freq(data->curr_opp); - unsigned long freq = opp_get_freq(opp); - - if (old_freq == freq) - return 0; - - dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp)); - - mutex_lock(&data->lock); - - if (data->disabled) - goto out; - - if (old_freq < freq) - err = exynos4_bus_setvolt(data, opp, data->curr_opp); - if (err) - goto out; - - if (old_freq != freq) { - switch (data->type) { - case TYPE_BUSF_EXYNOS4210: - err = exynos4210_set_busclk(data, opp); - break; - case TYPE_BUSF_EXYNOS4x12: - err = exynos4x12_set_busclk(data, opp); - break; - default: - err = -EINVAL; - } - } - if (err) - goto out; - - if (old_freq > freq) - err = exynos4_bus_setvolt(data, opp, data->curr_opp); - if (err) - goto out; - - data->curr_opp = opp; -out: - mutex_unlock(&data->lock); - return err; -} - -static int exynos4_get_busier_dmc(struct busfreq_data *data) -{ - u64 p0 = data->dmc[0].count[0]; - u64 p1 = data->dmc[1].count[0]; - - p0 *= data->dmc[1].ccnt; - p1 *= data->dmc[0].ccnt; - - if (data->dmc[1].ccnt == 0) - return 0; - - if (p0 > p1) - return 0; - return 1; -} - -static int exynos4_bus_get_dev_status(struct device *dev, - struct devfreq_dev_status *stat) -{ - struct platform_device *pdev = container_of(dev, struct platform_device, - dev); - struct busfreq_data *data = platform_get_drvdata(pdev); - int busier_dmc; - int cycles_x2 = 2; /* 2 x cycles */ - void __iomem *addr; - u32 timing; - u32 memctrl; - - exynos4_read_ppmu(data); - busier_dmc = exynos4_get_busier_dmc(data); - stat->current_frequency = opp_get_freq(data->curr_opp); - - if (busier_dmc) - addr = S5P_VA_DMC1; - else - addr = S5P_VA_DMC0; - - memctrl = __raw_readl(addr + 0x04); /* one of DDR2/3/LPDDR2 */ - timing = __raw_readl(addr + 0x38); /* CL or WL/RL values */ - - switch ((memctrl >> 8) & 0xf) { - case 0x4: /* DDR2 */ - cycles_x2 = ((timing >> 16) & 0xf) * 2; - break; - case 0x5: /* LPDDR2 */ - case 0x6: /* DDR3 */ - cycles_x2 = ((timing >> 8) & 0xf) + ((timing >> 0) & 0xf); - break; - default: - pr_err("%s: Unknown Memory Type(%d).\n", __func__, - (memctrl >> 8) & 0xf); - return -EINVAL; - } - - /* Number of cycles spent on memory access */ - stat->busy_time = data->dmc[busier_dmc].count[0] / 2 * (cycles_x2 + 2); - stat->busy_time *= 100 / BUS_SATURATION_RATIO; - stat->total_time = data->dmc[busier_dmc].ccnt; - - /* If the counters have overflown, retry */ - if (data->dmc[busier_dmc].ccnt_overflow || - data->dmc[busier_dmc].count_overflow[0]) - return -EAGAIN; - - return 0; -} - -static void exynos4_bus_exit(struct device *dev) -{ - struct platform_device *pdev = container_of(dev, struct platform_device, - dev); - struct busfreq_data *data = platform_get_drvdata(pdev); - - devfreq_unregister_opp_notifier(dev, data->devfreq); -} - -static struct devfreq_dev_profile exynos4_devfreq_profile = { - .initial_freq = 400000, - .polling_ms = 50, - .target = exynos4_bus_target, - .get_dev_status = exynos4_bus_get_dev_status, - .exit = exynos4_bus_exit, -}; - -static int exynos4210_init_tables(struct busfreq_data *data) -{ - u32 tmp; - int mgrp; - int i, err = 0; - - tmp = __raw_readl(S5P_CLKDIV_DMC0); - for (i = LV_0; i < EX4210_LV_NUM; i++) { - tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | - S5P_CLKDIV_DMC0_ACPPCLK_MASK | - S5P_CLKDIV_DMC0_DPHY_MASK | - S5P_CLKDIV_DMC0_DMC_MASK | - S5P_CLKDIV_DMC0_DMCD_MASK | - S5P_CLKDIV_DMC0_DMCP_MASK | - S5P_CLKDIV_DMC0_COPY2_MASK | - S5P_CLKDIV_DMC0_CORETI_MASK); - - tmp |= ((exynos4210_clkdiv_dmc0[i][0] << - S5P_CLKDIV_DMC0_ACP_SHIFT) | - (exynos4210_clkdiv_dmc0[i][1] << - S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) | - (exynos4210_clkdiv_dmc0[i][2] << - S5P_CLKDIV_DMC0_DPHY_SHIFT) | - (exynos4210_clkdiv_dmc0[i][3] << - S5P_CLKDIV_DMC0_DMC_SHIFT) | - (exynos4210_clkdiv_dmc0[i][4] << - S5P_CLKDIV_DMC0_DMCD_SHIFT) | - (exynos4210_clkdiv_dmc0[i][5] << - S5P_CLKDIV_DMC0_DMCP_SHIFT) | - (exynos4210_clkdiv_dmc0[i][6] << - S5P_CLKDIV_DMC0_COPY2_SHIFT) | - (exynos4210_clkdiv_dmc0[i][7] << - S5P_CLKDIV_DMC0_CORETI_SHIFT)); - - data->dmc_divtable[i] = tmp; - } - - tmp = __raw_readl(S5P_CLKDIV_TOP); - for (i = LV_0; i < EX4210_LV_NUM; i++) { - tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | - S5P_CLKDIV_TOP_ACLK100_MASK | - S5P_CLKDIV_TOP_ACLK160_MASK | - S5P_CLKDIV_TOP_ACLK133_MASK | - S5P_CLKDIV_TOP_ONENAND_MASK); - - tmp |= ((exynos4210_clkdiv_top[i][0] << - S5P_CLKDIV_TOP_ACLK200_SHIFT) | - (exynos4210_clkdiv_top[i][1] << - S5P_CLKDIV_TOP_ACLK100_SHIFT) | - (exynos4210_clkdiv_top[i][2] << - S5P_CLKDIV_TOP_ACLK160_SHIFT) | - (exynos4210_clkdiv_top[i][3] << - S5P_CLKDIV_TOP_ACLK133_SHIFT) | - (exynos4210_clkdiv_top[i][4] << - S5P_CLKDIV_TOP_ONENAND_SHIFT)); - - data->top_divtable[i] = tmp; - } - -#ifdef CONFIG_EXYNOS_ASV - tmp = exynos4_result_of_asv; -#else - tmp = 0; /* Max voltages for the reliability of the unknown */ -#endif - - pr_debug("ASV Group of Exynos4 is %d\n", tmp); - /* Use merged grouping for voltage */ - switch (tmp) { - case 0: - mgrp = 0; - break; - case 1: - case 2: - mgrp = 1; - break; - case 3: - case 4: - mgrp = 2; - break; - case 5: - case 6: - mgrp = 3; - break; - case 7: - mgrp = 4; - break; - default: - pr_warn("Unknown ASV Group. Use max voltage.\n"); - mgrp = 0; - } - - for (i = LV_0; i < EX4210_LV_NUM; i++) - exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i]; - - for (i = LV_0; i < EX4210_LV_NUM; i++) { - err = opp_add(data->dev, exynos4210_busclk_table[i].clk, - exynos4210_busclk_table[i].volt); - if (err) { - dev_err(data->dev, "Cannot add opp entries.\n"); - return err; - } - } - - - return 0; -} - -static int exynos4x12_init_tables(struct busfreq_data *data) -{ - unsigned int i; - unsigned int tmp; - int ret; - - /* Enable pause function for DREX2 DVFS */ - tmp = __raw_readl(S5P_DMC_PAUSE_CTRL); - tmp |= DMC_PAUSE_ENABLE; - __raw_writel(tmp, S5P_DMC_PAUSE_CTRL); - - tmp = __raw_readl(S5P_CLKDIV_DMC0); - - for (i = 0; i < EX4x12_LV_NUM; i++) { - tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | - S5P_CLKDIV_DMC0_ACPPCLK_MASK | - S5P_CLKDIV_DMC0_DPHY_MASK | - S5P_CLKDIV_DMC0_DMC_MASK | - S5P_CLKDIV_DMC0_DMCD_MASK | - S5P_CLKDIV_DMC0_DMCP_MASK); - - tmp |= ((exynos4x12_clkdiv_dmc0[i][0] << - S5P_CLKDIV_DMC0_ACP_SHIFT) | - (exynos4x12_clkdiv_dmc0[i][1] << - S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) | - (exynos4x12_clkdiv_dmc0[i][2] << - S5P_CLKDIV_DMC0_DPHY_SHIFT) | - (exynos4x12_clkdiv_dmc0[i][3] << - S5P_CLKDIV_DMC0_DMC_SHIFT) | - (exynos4x12_clkdiv_dmc0[i][4] << - S5P_CLKDIV_DMC0_DMCD_SHIFT) | - (exynos4x12_clkdiv_dmc0[i][5] << - S5P_CLKDIV_DMC0_DMCP_SHIFT)); - - data->dmc_divtable[i] = tmp; - } - -#ifdef CONFIG_EXYNOS_ASV - tmp = exynos4_result_of_asv; -#else - tmp = 0; /* Max voltages for the reliability of the unknown */ -#endif - - if (tmp > 8) - tmp = 0; - pr_debug("ASV Group of Exynos4x12 is %d\n", tmp); - - for (i = 0; i < EX4x12_LV_NUM; i++) { - exynos4x12_mifclk_table[i].volt = - exynos4x12_mif_step_50[tmp][i]; - exynos4x12_intclk_table[i].volt = - exynos4x12_int_volt[tmp][i]; - } - - for (i = 0; i < EX4x12_LV_NUM; i++) { - ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk, - exynos4x12_mifclk_table[i].volt); - if (ret) { - dev_err(data->dev, "Fail to add opp entries.\n"); - return ret; - } - } - - return 0; -} - -static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct busfreq_data *data = container_of(this, struct busfreq_data, - pm_notifier); - struct opp *opp; - unsigned long maxfreq = ULONG_MAX; - int err = 0; - - switch (event) { - case PM_SUSPEND_PREPARE: - /* Set Fastest and Deactivate DVFS */ - mutex_lock(&data->lock); - - data->disabled = true; - - opp = opp_find_freq_floor(data->dev, &maxfreq); - - err = exynos4_bus_setvolt(data, opp, data->curr_opp); - if (err) - goto unlock; - - switch (data->type) { - case TYPE_BUSF_EXYNOS4210: - err = exynos4210_set_busclk(data, opp); - break; - case TYPE_BUSF_EXYNOS4x12: - err = exynos4x12_set_busclk(data, opp); - break; - default: - err = -EINVAL; - } - if (err) - goto unlock; - - data->curr_opp = opp; -unlock: - mutex_unlock(&data->lock); - if (err) - return err; - return NOTIFY_OK; - case PM_POST_RESTORE: - case PM_POST_SUSPEND: - /* Reactivate */ - mutex_lock(&data->lock); - data->disabled = false; - mutex_unlock(&data->lock); - return NOTIFY_OK; - } - - return NOTIFY_DONE; -} - -static __devinit int exynos4_busfreq_probe(struct platform_device *pdev) -{ - struct busfreq_data *data; - struct opp *opp; - struct device *dev = &pdev->dev; - int err = 0; - - data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL); - if (data == NULL) { - dev_err(dev, "Cannot allocate memory.\n"); - return -ENOMEM; - } - - data->type = pdev->id_entry->driver_data; - data->dmc[0].hw_base = S5P_VA_DMC0; - data->dmc[1].hw_base = S5P_VA_DMC1; - data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event; - data->dev = dev; - mutex_init(&data->lock); - - switch (data->type) { - case TYPE_BUSF_EXYNOS4210: - err = exynos4210_init_tables(data); - break; - case TYPE_BUSF_EXYNOS4x12: - err = exynos4x12_init_tables(data); - break; - default: - dev_err(dev, "Cannot determine the device id %d\n", data->type); - err = -EINVAL; - } - if (err) - goto err_regulator; - - data->vdd_int = regulator_get(dev, "vdd_int"); - if (IS_ERR(data->vdd_int)) { - dev_err(dev, "Cannot get the regulator \"vdd_int\"\n"); - err = PTR_ERR(data->vdd_int); - goto err_regulator; - } - if (data->type == TYPE_BUSF_EXYNOS4x12) { - data->vdd_mif = regulator_get(dev, "vdd_mif"); - if (IS_ERR(data->vdd_mif)) { - dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n"); - err = PTR_ERR(data->vdd_mif); - regulator_put(data->vdd_int); - goto err_regulator; - - } - } - - opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); - if (IS_ERR(opp)) { - dev_err(dev, "Invalid initial frequency %lu kHz.\n", - exynos4_devfreq_profile.initial_freq); - err = PTR_ERR(opp); - goto err_opp_add; - } - data->curr_opp = opp; - - platform_set_drvdata(pdev, data); - - busfreq_mon_reset(data); - - data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile, - &devfreq_simple_ondemand, NULL); - if (IS_ERR(data->devfreq)) { - err = PTR_ERR(data->devfreq); - goto err_opp_add; - } - - devfreq_register_opp_notifier(dev, data->devfreq); - - err = register_pm_notifier(&data->pm_notifier); - if (err) { - dev_err(dev, "Failed to setup pm notifier\n"); - goto err_devfreq_add; - } - - return 0; -err_devfreq_add: - devfreq_remove_device(data->devfreq); -err_opp_add: - if (data->vdd_mif) - regulator_put(data->vdd_mif); - regulator_put(data->vdd_int); -err_regulator: - kfree(data); - return err; -} - -static __devexit int exynos4_busfreq_remove(struct platform_device *pdev) -{ - struct busfreq_data *data = platform_get_drvdata(pdev); - - unregister_pm_notifier(&data->pm_notifier); - devfreq_remove_device(data->devfreq); - regulator_put(data->vdd_int); - if (data->vdd_mif) - regulator_put(data->vdd_mif); - kfree(data); - - return 0; -} - -static int exynos4_busfreq_resume(struct device *dev) -{ - struct platform_device *pdev = container_of(dev, struct platform_device, - dev); - struct busfreq_data *data = platform_get_drvdata(pdev); - - busfreq_mon_reset(data); - return 0; -} - -static const struct dev_pm_ops exynos4_busfreq_pm = { - .resume = exynos4_busfreq_resume, -}; - -static const struct platform_device_id exynos4_busfreq_id[] = { - { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 }, - { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 }, - { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 }, - { }, -}; - -static struct platform_driver exynos4_busfreq_driver = { - .probe = exynos4_busfreq_probe, - .remove = __devexit_p(exynos4_busfreq_remove), - .id_table = exynos4_busfreq_id, - .driver = { - .name = "exynos4-busfreq", - .owner = THIS_MODULE, - .pm = &exynos4_busfreq_pm, - }, -}; - -static int __init exynos4_busfreq_init(void) -{ - return platform_driver_register(&exynos4_busfreq_driver); -} -late_initcall(exynos4_busfreq_init); - -static void __exit exynos4_busfreq_exit(void) -{ - platform_driver_unregister(&exynos4_busfreq_driver); -} -module_exit(exynos4_busfreq_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework"); -MODULE_AUTHOR("MyungJoo Ham "); -MODULE_ALIAS("exynos4-busfreq"); diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 92c9628c572d..5e00d1670aa9 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -3276,18 +3276,6 @@ int evergreen_init(struct radeon_device *rdev) rdev->accel_working = false; } } - - /* Don't start up if the MC ucode is missing on BTC parts. - * The default clocks and voltages before the MC ucode - * is loaded are not suffient for advanced operations. - */ - if (ASIC_IS_DCE5(rdev)) { - if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { - DRM_ERROR("radeon: MC ucode required for NI+.\n"); - return -EINVAL; - } - } - return 0; } diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f94b33ae2215..8aa1dbb45c67 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1093,6 +1093,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_surface *surface = NULL; struct vmw_dma_buffer *bo = NULL; struct ttm_base_object *user_obj; + u64 required_size; int ret; /** @@ -1101,9 +1102,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, * requested framebuffer. */ - if (!vmw_kms_validate_mode_vram(dev_priv, - mode_cmd->pitch, - mode_cmd->height)) { + required_size = mode_cmd->pitch * mode_cmd->height; + if (unlikely(required_size > (u64) dev_priv->vram_size)) { DRM_ERROR("VRAM size is too small for requested mode.\n"); return ERR_PTR(-ENOMEM); } diff --git a/trunk/drivers/input/touchscreen/st1232.c b/trunk/drivers/input/touchscreen/st1232.c index 8825fe37d433..4ab371358b33 100644 --- a/trunk/drivers/input/touchscreen/st1232.c +++ b/trunk/drivers/input/touchscreen/st1232.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -47,7 +46,6 @@ struct st1232_ts_data { struct i2c_client *client; struct input_dev *input_dev; struct st1232_ts_finger finger[MAX_FINGERS]; - struct dev_pm_qos_request low_latency_req; }; static int st1232_ts_read_data(struct st1232_ts_data *ts) @@ -120,17 +118,8 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id) } /* SYN_MT_REPORT only if no contact */ - if (!count) { + if (!count) input_mt_sync(input_dev); - if (ts->low_latency_req.dev) { - dev_pm_qos_remove_request(&ts->low_latency_req); - ts->low_latency_req.dev = NULL; - } - } else if (!ts->low_latency_req.dev) { - /* First contact, request 100 us latency. */ - dev_pm_qos_add_ancestor_request(&ts->client->dev, - &ts->low_latency_req, 100); - } /* SYN_REPORT */ input_sync(input_dev); diff --git a/trunk/drivers/md/bitmap.c b/trunk/drivers/md/bitmap.c index 6d03774b176e..b6907118283a 100644 --- a/trunk/drivers/md/bitmap.c +++ b/trunk/drivers/md/bitmap.c @@ -1393,6 +1393,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto atomic_read(&bitmap->behind_writes), bitmap->mddev->bitmap_info.max_write_behind); } + if (bitmap->mddev->degraded) + /* Never clear bits or update events_cleared when degraded */ + success = 0; while (sectors) { sector_t blocks; @@ -1406,7 +1409,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto return; } - if (success && !bitmap->mddev->degraded && + if (success && bitmap->events_cleared < bitmap->mddev->events) { bitmap->events_cleared = bitmap->mddev->events; bitmap->need_sync = 1; diff --git a/trunk/drivers/md/linear.c b/trunk/drivers/md/linear.c index 627456542fb3..c3273efd08cb 100644 --- a/trunk/drivers/md/linear.c +++ b/trunk/drivers/md/linear.c @@ -230,7 +230,6 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) return -EINVAL; rdev->raid_disk = rdev->saved_raid_disk; - rdev->saved_raid_disk = -1; newconf = linear_conf(mddev,mddev->raid_disks+1); diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index f47f1f8ac44b..ee981737edfc 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -7360,7 +7360,8 @@ static int remove_and_add_spares(struct mddev *mddev) spares++; md_new_event(mddev); set_bit(MD_CHANGE_DEVS, &mddev->flags); - } + } else + break; } } } diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index 858fdbb7eb07..31670f8d6b65 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -3065,17 +3065,11 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) } } else if (test_bit(In_sync, &rdev->flags)) set_bit(R5_Insync, &dev->flags); - else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) + else { /* in sync if before recovery_offset */ - set_bit(R5_Insync, &dev->flags); - else if (test_bit(R5_UPTODATE, &dev->flags) && - test_bit(R5_Expanded, &dev->flags)) - /* If we've reshaped into here, we assume it is Insync. - * We will shortly update recovery_offset to make - * it official. - */ - set_bit(R5_Insync, &dev->flags); - + if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) + set_bit(R5_Insync, &dev->flags); + } if (rdev && test_bit(R5_WriteError, &dev->flags)) { clear_bit(R5_Insync, &dev->flags); if (!test_bit(Faulty, &rdev->flags)) { diff --git a/trunk/drivers/media/video/omap3isp/ispccdc.c b/trunk/drivers/media/video/omap3isp/ispccdc.c index 54a4a3f22e2e..b0b0fa5a3572 100644 --- a/trunk/drivers/media/video/omap3isp/ispccdc.c +++ b/trunk/drivers/media/video/omap3isp/ispccdc.c @@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->video_out.video.entity); - struct video_device *vdev = ccdc->subdev.devnode; + struct video_device *vdev = &ccdc->subdev.devnode; struct v4l2_event event; memset(&event, 0, sizeof(event)); diff --git a/trunk/drivers/media/video/omap3isp/ispstat.c b/trunk/drivers/media/video/omap3isp/ispstat.c index bc0b2c7349b9..68d539456c55 100644 --- a/trunk/drivers/media/video/omap3isp/ispstat.c +++ b/trunk/drivers/media/video/omap3isp/ispstat.c @@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) static void isp_stat_queue_event(struct ispstat *stat, int err) { - struct video_device *vdev = stat->subdev.devnode; + struct video_device *vdev = &stat->subdev.devnode; struct v4l2_event event; struct omap3isp_stat_event_status *status = (void *)event.u.data; diff --git a/trunk/drivers/mfd/ab5500-debugfs.c b/trunk/drivers/mfd/ab5500-debugfs.c index b7b2d3483fd4..43c0ebb81956 100644 --- a/trunk/drivers/mfd/ab5500-debugfs.c +++ b/trunk/drivers/mfd/ab5500-debugfs.c @@ -4,7 +4,7 @@ * Debugfs support for the AB5500 MFD driver */ -#include +#include #include #include #include diff --git a/trunk/drivers/mfd/ab8500-core.c b/trunk/drivers/mfd/ab8500-core.c index d3d572b2317b..1e9173804ede 100644 --- a/trunk/drivers/mfd/ab8500-core.c +++ b/trunk/drivers/mfd/ab8500-core.c @@ -620,7 +620,6 @@ static struct resource __devinitdata ab8500_fg_resources[] = { static struct resource __devinitdata ab8500_chargalg_resources[] = {}; -#ifdef CONFIG_DEBUG_FS static struct resource __devinitdata ab8500_debug_resources[] = { { .name = "IRQ_FIRST", @@ -635,7 +634,6 @@ static struct resource __devinitdata ab8500_debug_resources[] = { .flags = IORESOURCE_IRQ, }, }; -#endif static struct resource __devinitdata ab8500_usb_resources[] = { { diff --git a/trunk/drivers/mfd/adp5520.c b/trunk/drivers/mfd/adp5520.c index 8d816cce8322..f1d88483112c 100644 --- a/trunk/drivers/mfd/adp5520.c +++ b/trunk/drivers/mfd/adp5520.c @@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask) ret = __adp5520_read(chip->client, reg, ®_val); - if (!ret && ((reg_val & bit_mask) != bit_mask)) { + if (!ret && ((reg_val & bit_mask) == 0)) { reg_val |= bit_mask; ret = __adp5520_write(chip->client, reg, reg_val); } diff --git a/trunk/drivers/mfd/da903x.c b/trunk/drivers/mfd/da903x.c index 1924b857a0fb..1b79c37fd599 100644 --- a/trunk/drivers/mfd/da903x.c +++ b/trunk/drivers/mfd/da903x.c @@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask) if (ret) goto out; - if ((reg_val & bit_mask) != bit_mask) { + if ((reg_val & bit_mask) == 0) { reg_val |= bit_mask; ret = __da903x_write(chip->client, reg, reg_val); } @@ -549,7 +549,6 @@ static int __devexit da903x_remove(struct i2c_client *client) struct da903x_chip *chip = i2c_get_clientdata(client); da903x_remove_subdevs(chip); - free_irq(client->irq, chip); kfree(chip); return 0; } diff --git a/trunk/drivers/mfd/jz4740-adc.c b/trunk/drivers/mfd/jz4740-adc.c index ef39528088f2..1e9ee533eacb 100644 --- a/trunk/drivers/mfd/jz4740-adc.c +++ b/trunk/drivers/mfd/jz4740-adc.c @@ -16,7 +16,6 @@ */ #include -#include #include #include #include diff --git a/trunk/drivers/mfd/tps6586x.c b/trunk/drivers/mfd/tps6586x.c index a5ddf31b60ca..bba26d96c240 100644 --- a/trunk/drivers/mfd/tps6586x.c +++ b/trunk/drivers/mfd/tps6586x.c @@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask) if (ret) goto out; - if ((reg_val & bit_mask) != bit_mask) { + if ((reg_val & bit_mask) == 0) { reg_val |= bit_mask; ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val); } diff --git a/trunk/drivers/mfd/tps65910.c b/trunk/drivers/mfd/tps65910.c index c1da84bc1573..6f5b8cf2f652 100644 --- a/trunk/drivers/mfd/tps65910.c +++ b/trunk/drivers/mfd/tps65910.c @@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask) goto out; } - data &= ~mask; + data &= mask; err = tps65910_i2c_write(tps65910, reg, 1, &data); if (err) dev_err(tps65910->dev, "write to reg %x failed\n", reg); diff --git a/trunk/drivers/mfd/twl-core.c b/trunk/drivers/mfd/twl-core.c index 61e70cfaa774..bfbd66021afd 100644 --- a/trunk/drivers/mfd/twl-core.c +++ b/trunk/drivers/mfd/twl-core.c @@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } - if (unlikely(!inuse)) { - pr_err("%s: not initialized\n", DRIVER_NAME); - return -EPERM; - } sid = twl_map[mod_no].sid; twl = &twl_modules[sid]; + if (unlikely(!inuse)) { + pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); + return -EPERM; + } mutex_lock(&twl->xfer_lock); /* * [MSG1]: fill the register address data @@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } - if (unlikely(!inuse)) { - pr_err("%s: not initialized\n", DRIVER_NAME); - return -EPERM; - } sid = twl_map[mod_no].sid; twl = &twl_modules[sid]; + if (unlikely(!inuse)) { + pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); + return -EPERM; + } mutex_lock(&twl->xfer_lock); /* [MSG1] fill the register address data */ msg = &twl->xfer_msg[0]; diff --git a/trunk/drivers/mfd/twl4030-irq.c b/trunk/drivers/mfd/twl4030-irq.c index 29f11e0765fe..f062c8cc6c38 100644 --- a/trunk/drivers/mfd/twl4030-irq.c +++ b/trunk/drivers/mfd/twl4030-irq.c @@ -432,7 +432,6 @@ struct sih_agent { u32 edge_change; struct mutex irq_lock; - char *irq_name; }; /*----------------------------------------------------------------------*/ @@ -590,7 +589,7 @@ static inline int sih_read_isr(const struct sih *sih) * Generic handler for SIH interrupts ... we "know" this is called * in task context, with IRQs enabled. */ -static irqreturn_t handle_twl4030_sih(int irq, void *data) +static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc) { struct sih_agent *agent = irq_get_handler_data(irq); const struct sih *sih = agent->sih; @@ -603,7 +602,7 @@ static irqreturn_t handle_twl4030_sih(int irq, void *data) pr_err("twl4030: %s SIH, read ISR error %d\n", sih->name, isr); /* REVISIT: recover; eventually mask it all, etc */ - return IRQ_HANDLED; + return; } while (isr) { @@ -617,7 +616,6 @@ static irqreturn_t handle_twl4030_sih(int irq, void *data) pr_err("twl4030: %s SIH, invalid ISR bit %d\n", sih->name, irq); } - return IRQ_HANDLED; } static unsigned twl4030_irq_next; @@ -670,19 +668,18 @@ int twl4030_sih_setup(int module) activate_irq(irq); } + status = irq_base; twl4030_irq_next += i; /* replace generic PIH handler (handle_simple_irq) */ irq = sih_mod + twl4030_irq_base; irq_set_handler_data(irq, agent); - agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name); - status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0, - agent->irq_name ?: sih->name, NULL); + irq_set_chained_handler(irq, handle_twl4030_sih); pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name, irq, irq_base, twl4030_irq_next - 1); - return status < 0 ? status : irq_base; + return status; } /* FIXME need a call to reverse twl4030_sih_setup() ... */ @@ -736,9 +733,8 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) } /* install an irq handler to demultiplex the TWL4030 interrupt */ - status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, - IRQF_ONESHOT, - "TWL4030-PIH", NULL); + status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0, + "TWL4030-PIH", NULL); if (status < 0) { pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status); goto fail_rqirq; diff --git a/trunk/drivers/mfd/wm8994-core.c b/trunk/drivers/mfd/wm8994-core.c index 61894fced8ea..5d6ba132837e 100644 --- a/trunk/drivers/mfd/wm8994-core.c +++ b/trunk/drivers/mfd/wm8994-core.c @@ -239,7 +239,6 @@ static int wm8994_suspend(struct device *dev) switch (wm8994->type) { case WM8958: - case WM1811: ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1); if (ret < 0) { dev_err(dev, "Failed to read power status: %d\n", ret); diff --git a/trunk/drivers/net/ethernet/realtek/r8169.c b/trunk/drivers/net/ethernet/realtek/r8169.c index c8f47f17186f..67bf07819992 100644 --- a/trunk/drivers/net/ethernet/realtek/r8169.c +++ b/trunk/drivers/net/ethernet/realtek/r8169.c @@ -477,6 +477,7 @@ enum rtl_register_content { /* Config1 register p.24 */ LEDS1 = (1 << 7), LEDS0 = (1 << 6), + MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ Speed_down = (1 << 4), MEMMAP = (1 << 3), IOMAP = (1 << 2), @@ -484,7 +485,6 @@ enum rtl_register_content { PMEnable = (1 << 0), /* Power Management Enable */ /* Config2 register p. 25 */ - MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ PCI_Clock_66MHz = 0x01, PCI_Clock_33MHz = 0x00, @@ -3426,24 +3426,22 @@ static const struct rtl_cfg_info { }; /* Cfg9346_Unlock assumed. */ -static unsigned rtl_try_msi(struct rtl8169_private *tp, +static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, const struct rtl_cfg_info *cfg) { - void __iomem *ioaddr = tp->mmio_addr; unsigned msi = 0; u8 cfg2; cfg2 = RTL_R8(Config2) & ~MSIEnable; if (cfg->features & RTL_FEATURE_MSI) { - if (pci_enable_msi(tp->pci_dev)) { - netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); + if (pci_enable_msi(pdev)) { + dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); } else { cfg2 |= MSIEnable; msi = RTL_FEATURE_MSI; } } - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) - RTL_W8(Config2, cfg2); + RTL_W8(Config2, cfg2); return msi; } @@ -4079,7 +4077,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->features |= RTL_FEATURE_WOL; if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) tp->features |= RTL_FEATURE_WOL; - tp->features |= rtl_try_msi(tp, cfg); + tp->features |= rtl_try_msi(pdev, ioaddr, cfg); RTL_W8(Cfg9346, Cfg9346_Lock); if (rtl_tbi_enabled(tp)) { diff --git a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c index c97d2f590855..dca9d3369cdd 100644 --- a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c @@ -836,13 +836,11 @@ int cpdma_chan_stop(struct cpdma_chan *chan) chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); /* handle completed packets */ - spin_unlock_irqrestore(&chan->lock, flags); do { ret = __cpdma_chan_process(chan); if (ret < 0) break; } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); - spin_lock_irqsave(&chan->lock, flags); /* remaining packets haven't been tx/rx'ed, clean them up */ while (chan->head) { diff --git a/trunk/drivers/net/usb/asix.c b/trunk/drivers/net/usb/asix.c index e95f0e60a9bc..e6fed4d4cb77 100644 --- a/trunk/drivers/net/usb/asix.c +++ b/trunk/drivers/net/usb/asix.c @@ -1655,10 +1655,6 @@ static const struct usb_device_id products [] = { // ASIX 88772a USB_DEVICE(0x0db0, 0xa877), .driver_info = (unsigned long) &ax88772_info, -}, { - // Asus USB Ethernet Adapter - USB_DEVICE (0x0b95, 0x7e2b), - .driver_info = (unsigned long) &ax88772_info, }, { }, // END }; diff --git a/trunk/drivers/net/wireless/ath/ath9k/rc.c b/trunk/drivers/net/wireless/ath/ath9k/rc.c index 528d5f3e868c..888abc2be3a5 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/rc.c +++ b/trunk/drivers/net/wireless/ath/ath9k/rc.c @@ -1271,9 +1271,7 @@ static void ath_rc_init(struct ath_softc *sc, ath_rc_priv->max_valid_rate = k; ath_rc_sort_validrates(rate_table, ath_rc_priv); - ath_rc_priv->rate_max_phy = (k > 4) ? - ath_rc_priv->valid_rate_index[k-4] : - ath_rc_priv->valid_rate_index[k-1]; + ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; ath_rc_priv->rate_table = rate_table; ath_dbg(common, ATH_DBG_CONFIG, diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 5c7c17c7166a..a7a6def40d05 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -606,8 +606,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) if (ctx->ht.enabled) { /* if HT40 is used, it should not change * after associated except channel switch */ - if (!ctx->ht.is_40mhz || - !iwl_is_associated_ctx(ctx)) + if (iwl_is_associated_ctx(ctx) && + !ctx->ht.is_40mhz) iwlagn_config_ht40(conf, ctx); } else ctx->ht.is_40mhz = false; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index df1540ca6102..35a6b71f358c 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -91,10 +91,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; } else { - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - else - tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; } iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c index e0e9a3dfbc00..bacc06c95e7a 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2850,9 +2850,6 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw, int ret; u8 sta_id; - if (ctx->ctxid != IWL_RXON_CTX_PAN) - return 0; - IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->shrd->mutex); @@ -2901,9 +2898,6 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw, struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; struct iwl_rxon_context *ctx = vif_priv->ctx; - if (ctx->ctxid != IWL_RXON_CTX_PAN) - return; - IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->shrd->mutex); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 5f17ab8e76ba..ce918980e977 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c @@ -1197,7 +1197,9 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); /* Set up entry for this TFD in Tx byte-count array */ - iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); + if (is_agg) + iwl_trans_txq_update_byte_cnt_tbl(trans, txq, + le16_to_cpu(tx_cmd->len)); dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen, DMA_BIDIRECTIONAL); diff --git a/trunk/drivers/net/wireless/mwifiex/cmdevt.c b/trunk/drivers/net/wireless/mwifiex/cmdevt.c index 6e0a3eaecf70..ac278156d390 100644 --- a/trunk/drivers/net/wireless/mwifiex/cmdevt.c +++ b/trunk/drivers/net/wireless/mwifiex/cmdevt.c @@ -939,6 +939,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; unsigned long cmd_flags; + unsigned long cmd_pending_q_flags; unsigned long scan_pending_q_flags; uint16_t cancel_scan_cmd = false; @@ -948,9 +949,12 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) cmd_node = adapter->curr_cmd; cmd_node->wait_q_enabled = false; cmd_node->cmd_flag |= CMD_F_CANCELED; + spin_lock_irqsave(&adapter->cmd_pending_q_lock, + cmd_pending_q_flags); + list_del(&cmd_node->list); + spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, + cmd_pending_q_flags); mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - mwifiex_complete_cmd(adapter, adapter->curr_cmd); - adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); } @@ -977,6 +981,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); } adapter->cmd_wait_q.status = -1; + mwifiex_complete_cmd(adapter, adapter->curr_cmd); } /* diff --git a/trunk/drivers/sh/intc/core.c b/trunk/drivers/sh/intc/core.c index be5a025eeca3..8b7a141ff35e 100644 --- a/trunk/drivers/sh/intc/core.c +++ b/trunk/drivers/sh/intc/core.c @@ -354,8 +354,6 @@ int __init register_intc_controller(struct intc_desc *desc) if (desc->force_enable) intc_enable_disable_enum(desc, d, desc->force_enable, 1); - d->skip_suspend = desc->skip_syscore_suspend; - nr_intc_controllers++; return 0; @@ -388,9 +386,6 @@ static int intc_suspend(void) list_for_each_entry(d, &intc_list, list) { int irq; - if (d->skip_suspend) - continue; - /* enable wakeup irqs belonging to this intc controller */ for_each_active_irq(irq) { struct irq_data *data; @@ -414,9 +409,6 @@ static void intc_resume(void) list_for_each_entry(d, &intc_list, list) { int irq; - if (d->skip_suspend) - continue; - for_each_active_irq(irq) { struct irq_data *data; struct irq_chip *chip; diff --git a/trunk/drivers/sh/intc/internals.h b/trunk/drivers/sh/intc/internals.h index b3fe1cf25a28..5b934851efa8 100644 --- a/trunk/drivers/sh/intc/internals.h +++ b/trunk/drivers/sh/intc/internals.h @@ -67,7 +67,6 @@ struct intc_desc_int { struct intc_window *window; unsigned int nr_windows; struct irq_chip chip; - bool skip_suspend; }; diff --git a/trunk/drivers/usb/dwc3/core.c b/trunk/drivers/usb/dwc3/core.c index 600d82348511..717ebc9ff941 100644 --- a/trunk/drivers/usb/dwc3/core.c +++ b/trunk/drivers/usb/dwc3/core.c @@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc) ret = -ENODEV; goto err0; } - dwc->revision = reg; + dwc->revision = reg & DWC3_GSNPSREV_MASK; dwc3_core_soft_reset(dwc); diff --git a/trunk/drivers/usb/gadget/epautoconf.c b/trunk/drivers/usb/gadget/epautoconf.c index 4dff83d2f265..596a0b464e61 100644 --- a/trunk/drivers/usb/gadget/epautoconf.c +++ b/trunk/drivers/usb/gadget/epautoconf.c @@ -130,6 +130,9 @@ ep_matches ( num_req_streams = ep_comp->bmAttributes & 0x1f; if (num_req_streams > ep->max_streams) return 0; + /* Update the ep_comp descriptor if needed */ + if (num_req_streams != ep->max_streams) + ep_comp->bmAttributes = ep->max_streams; } } diff --git a/trunk/drivers/usb/host/isp1760-if.c b/trunk/drivers/usb/host/isp1760-if.c index 2ac4ac2e4ef9..a7dc1e1d45f2 100644 --- a/trunk/drivers/usb/host/isp1760-if.c +++ b/trunk/drivers/usb/host/isp1760-if.c @@ -18,7 +18,7 @@ #include "isp1760-hcd.h" -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) +#ifdef CONFIG_OF #include #include #include @@ -31,7 +31,7 @@ #include #endif -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) +#ifdef CONFIG_OF struct isp1760 { struct usb_hcd *hcd; int rst_gpio; @@ -437,7 +437,7 @@ static int __init isp1760_init(void) ret = platform_driver_register(&isp1760_plat_driver); if (!ret) any_ret = 0; -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) +#ifdef CONFIG_OF ret = platform_driver_register(&isp1760_of_driver); if (!ret) any_ret = 0; @@ -457,7 +457,7 @@ module_init(isp1760_init); static void __exit isp1760_exit(void) { platform_driver_unregister(&isp1760_plat_driver); -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) +#ifdef CONFIG_OF platform_driver_unregister(&isp1760_of_driver); #endif #ifdef CONFIG_PCI diff --git a/trunk/drivers/usb/musb/musb_host.c b/trunk/drivers/usb/musb/musb_host.c index 79cb0af779fa..60ddba8066ea 100644 --- a/trunk/drivers/usb/musb/musb_host.c +++ b/trunk/drivers/usb/musb/musb_host.c @@ -774,10 +774,6 @@ static void musb_ep_program(struct musb *musb, u8 epnum, if (musb->double_buffer_not_ok) musb_writew(epio, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); - else if (can_bulk_split(musb, qh->type)) - musb_writew(epio, MUSB_TXMAXP, packet_sz - | ((hw_ep->max_packet_sz_tx / - packet_sz) - 1) << 11); else musb_writew(epio, MUSB_TXMAXP, qh->maxpacket | diff --git a/trunk/firmware/README.AddingFirmware b/trunk/firmware/README.AddingFirmware index ea78c3a17eec..e24cd8986d8b 100644 --- a/trunk/firmware/README.AddingFirmware +++ b/trunk/firmware/README.AddingFirmware @@ -12,7 +12,7 @@ here. This directory is _NOT_ for adding arbitrary new firmware images. The place to add those is the separate linux-firmware repository: - git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git + git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git That repository contains all these firmware images which have been extracted from older drivers, as well various new firmware images which @@ -22,7 +22,6 @@ been permitted to redistribute under separate cover. To submit firmware to that repository, please send either a git binary diff or preferably a git pull request to: David Woodhouse - Ben Hutchings Your commit should include an update to the WHENCE file clearly identifying the licence under which the firmware is available, and diff --git a/trunk/fs/btrfs/async-thread.c b/trunk/fs/btrfs/async-thread.c index 0cc20b35c1c4..704a2ba08ea8 100644 --- a/trunk/fs/btrfs/async-thread.c +++ b/trunk/fs/btrfs/async-thread.c @@ -563,8 +563,8 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) struct list_head *fallback; int ret; - spin_lock_irqsave(&workers->lock, flags); again: + spin_lock_irqsave(&workers->lock, flags); worker = next_worker(workers); if (!worker) { @@ -579,7 +579,6 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) spin_unlock_irqrestore(&workers->lock, flags); /* we're below the limit, start another worker */ ret = __btrfs_start_workers(workers); - spin_lock_irqsave(&workers->lock, flags); if (ret) goto fallback; goto again; diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index fd1a06df5bc6..0a6b928813a4 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -4590,6 +4590,10 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans, int err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, dentry->d_name.len, backref, index); + if (!err) { + d_instantiate(dentry, inode); + return 0; + } if (err > 0) err = -EEXIST; return err; @@ -4651,7 +4655,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, else { init_special_inode(inode, inode->i_mode, rdev); btrfs_update_inode(trans, root, inode); - d_instantiate(dentry, inode); } out_unlock: nr = trans->blocks_used; @@ -4719,7 +4722,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_mapping->a_ops = &btrfs_aops; inode->i_mapping->backing_dev_info = &root->fs_info->bdi; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; - d_instantiate(dentry, inode); } out_unlock: nr = trans->blocks_used; @@ -4777,7 +4779,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *parent = dentry->d_parent; err = btrfs_update_inode(trans, root, inode); BUG_ON(err); - d_instantiate(dentry, inode); btrfs_log_new_name(trans, inode, NULL, parent); } @@ -7244,8 +7245,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, drop_inode = 1; out_unlock: - if (!err) - d_instantiate(dentry, inode); nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); if (drop_inode) { diff --git a/trunk/fs/fs-writeback.c b/trunk/fs/fs-writeback.c index 30f78bb16afb..e4c7af393c2d 100644 --- a/trunk/fs/fs-writeback.c +++ b/trunk/fs/fs-writeback.c @@ -47,6 +47,17 @@ struct wb_writeback_work { struct completion *done; /* set if the caller waits */ }; +const char *wb_reason_name[] = { + [WB_REASON_BACKGROUND] = "background", + [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages", + [WB_REASON_SYNC] = "sync", + [WB_REASON_PERIODIC] = "periodic", + [WB_REASON_LAPTOP_TIMER] = "laptop_timer", + [WB_REASON_FREE_MORE_MEM] = "free_more_memory", + [WB_REASON_FS_FREE_SPACE] = "fs_free_space", + [WB_REASON_FORKER_THREAD] = "forker_thread" +}; + /* * Include the creation of the trace points after defining the * wb_writeback_work structure so that the definition remains local to this diff --git a/trunk/include/linux/freezer.h b/trunk/include/linux/freezer.h index 0ab54e16a91f..7bcfe73d999b 100644 --- a/trunk/include/linux/freezer.h +++ b/trunk/include/linux/freezer.h @@ -116,11 +116,9 @@ static inline int freezer_should_skip(struct task_struct *p) /* Like schedule_timeout_killable(), but should not block the freezer. */ #define freezable_schedule_timeout_killable(timeout) \ ({ \ - long __retval; \ freezer_do_not_count(); \ - __retval = schedule_timeout_killable(timeout); \ + schedule_timeout_killable(timeout); \ freezer_count(); \ - __retval; \ }) /* diff --git a/trunk/include/linux/lglock.h b/trunk/include/linux/lglock.h index 87f402ccec55..f549056fb20b 100644 --- a/trunk/include/linux/lglock.h +++ b/trunk/include/linux/lglock.h @@ -22,7 +22,6 @@ #include #include #include -#include /* can make br locks by using local lock for read side, global lock for write */ #define br_lock_init(name) name##_lock_init() @@ -73,31 +72,9 @@ #define DEFINE_LGLOCK(name) \ \ - DEFINE_SPINLOCK(name##_cpu_lock); \ - cpumask_t name##_cpus __read_mostly; \ DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ DEFINE_LGLOCK_LOCKDEP(name); \ \ - static int \ - name##_lg_cpu_callback(struct notifier_block *nb, \ - unsigned long action, void *hcpu) \ - { \ - switch (action & ~CPU_TASKS_FROZEN) { \ - case CPU_UP_PREPARE: \ - spin_lock(&name##_cpu_lock); \ - cpu_set((unsigned long)hcpu, name##_cpus); \ - spin_unlock(&name##_cpu_lock); \ - break; \ - case CPU_UP_CANCELED: case CPU_DEAD: \ - spin_lock(&name##_cpu_lock); \ - cpu_clear((unsigned long)hcpu, name##_cpus); \ - spin_unlock(&name##_cpu_lock); \ - } \ - return NOTIFY_OK; \ - } \ - static struct notifier_block name##_lg_cpu_notifier = { \ - .notifier_call = name##_lg_cpu_callback, \ - }; \ void name##_lock_init(void) { \ int i; \ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ @@ -106,11 +83,6 @@ lock = &per_cpu(name##_lock, i); \ *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ } \ - register_hotcpu_notifier(&name##_lg_cpu_notifier); \ - get_online_cpus(); \ - for_each_online_cpu(i) \ - cpu_set(i, name##_cpus); \ - put_online_cpus(); \ } \ EXPORT_SYMBOL(name##_lock_init); \ \ @@ -152,9 +124,9 @@ \ void name##_global_lock_online(void) { \ int i; \ - spin_lock(&name##_cpu_lock); \ + preempt_disable(); \ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ - for_each_cpu(i, &name##_cpus) { \ + for_each_online_cpu(i) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_lock(lock); \ @@ -165,12 +137,12 @@ void name##_global_unlock_online(void) { \ int i; \ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ - for_each_cpu(i, &name##_cpus) { \ + for_each_online_cpu(i) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_unlock(lock); \ } \ - spin_unlock(&name##_cpu_lock); \ + preempt_enable(); \ } \ EXPORT_SYMBOL(name##_global_unlock_online); \ \ diff --git a/trunk/include/linux/pm.h b/trunk/include/linux/pm.h index e4982ac3fbbc..21e04dd72a84 100644 --- a/trunk/include/linux/pm.h +++ b/trunk/include/linux/pm.h @@ -508,8 +508,6 @@ struct dev_pm_info { unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; - ktime_t suspend_time; - s64 max_time_suspended_ns; #endif struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ struct pm_qos_constraints *constraints; diff --git a/trunk/include/linux/pm_domain.h b/trunk/include/linux/pm_domain.h index a03a0ad998b8..65633e5a2bc0 100644 --- a/trunk/include/linux/pm_domain.h +++ b/trunk/include/linux/pm_domain.h @@ -10,7 +10,6 @@ #define _LINUX_PM_DOMAIN_H #include -#include enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ @@ -22,23 +21,6 @@ enum gpd_status { struct dev_power_governor { bool (*power_down_ok)(struct dev_pm_domain *domain); - bool (*stop_ok)(struct device *dev); -}; - -struct gpd_dev_ops { - int (*start)(struct device *dev); - int (*stop)(struct device *dev); - int (*save_state)(struct device *dev); - int (*restore_state)(struct device *dev); - int (*suspend)(struct device *dev); - int (*suspend_late)(struct device *dev); - int (*resume_early)(struct device *dev); - int (*resume)(struct device *dev); - int (*freeze)(struct device *dev); - int (*freeze_late)(struct device *dev); - int (*thaw_early)(struct device *dev); - int (*thaw)(struct device *dev); - bool (*active_wakeup)(struct device *dev); }; struct generic_pm_domain { @@ -50,7 +32,6 @@ struct generic_pm_domain { struct mutex lock; struct dev_power_governor *gov; struct work_struct power_off_work; - char *name; unsigned int in_progress; /* Number of devices being suspended now */ atomic_t sd_count; /* Number of subdomains with power "on" */ enum gpd_status status; /* Current state of the domain */ @@ -63,13 +44,10 @@ struct generic_pm_domain { bool suspend_power_off; /* Power status before system suspend */ bool dev_irq_safe; /* Device callbacks are IRQ-safe */ int (*power_off)(struct generic_pm_domain *domain); - s64 power_off_latency_ns; int (*power_on)(struct generic_pm_domain *domain); - s64 power_on_latency_ns; - struct gpd_dev_ops dev_ops; - s64 break_even_ns; /* Power break even for the entire domain. */ - s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ - ktime_t power_off_time; + int (*start_device)(struct device *dev); + int (*stop_device)(struct device *dev); + bool (*active_wakeup)(struct device *dev); }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -84,18 +62,8 @@ struct gpd_link { struct list_head slave_node; }; -struct gpd_timing_data { - s64 stop_latency_ns; - s64 start_latency_ns; - s64 save_state_latency_ns; - s64 restore_state_latency_ns; - s64 break_even_ns; -}; - struct generic_pm_domain_data { struct pm_domain_data base; - struct gpd_dev_ops ops; - struct gpd_timing_data td; bool need_restore; }; @@ -105,54 +73,18 @@ static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data * } #ifdef CONFIG_PM_GENERIC_DOMAINS -static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) -{ - return to_gpd_data(dev->power.subsys_data->domain_data); -} - -extern struct dev_power_governor simple_qos_governor; - -extern struct generic_pm_domain *dev_to_genpd(struct device *dev); -extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, - struct device *dev, - struct gpd_timing_data *td); - -static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, - struct device *dev) -{ - return __pm_genpd_add_device(genpd, dev, NULL); -} - +extern int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev); extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev); extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); -extern int pm_genpd_add_callbacks(struct device *dev, - struct gpd_dev_ops *ops, - struct gpd_timing_data *td); -extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); - extern int pm_genpd_poweron(struct generic_pm_domain *genpd); - -extern bool default_stop_ok(struct device *dev); - -extern struct dev_power_governor pm_domain_always_on_gov; #else - -static inline struct generic_pm_domain *dev_to_genpd(struct device *dev) -{ - return ERR_PTR(-ENOSYS); -} -static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, - struct device *dev, - struct gpd_timing_data *td) -{ - return -ENOSYS; -} static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) { @@ -173,35 +105,14 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline int pm_genpd_add_callbacks(struct device *dev, - struct gpd_dev_ops *ops, - struct gpd_timing_data *td) -{ - return -ENOSYS; -} -static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) -{ - return -ENOSYS; -} -static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off) -{ -} +static inline void pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) {} static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) { return -ENOSYS; } -static inline bool default_stop_ok(struct device *dev) -{ - return false; -} -#define pm_domain_always_on_gov NULL #endif -static inline int pm_genpd_remove_callbacks(struct device *dev) -{ - return __pm_genpd_remove_callbacks(dev, true); -} - #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); extern void pm_genpd_poweroff_unused(void); diff --git a/trunk/include/linux/pm_qos.h b/trunk/include/linux/pm_qos.h index e5bbcbaa6f57..83b0ea302a80 100644 --- a/trunk/include/linux/pm_qos.h +++ b/trunk/include/linux/pm_qos.h @@ -78,7 +78,6 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); int pm_qos_request_active(struct pm_qos_request *req); s32 pm_qos_read_value(struct pm_qos_constraints *c); -s32 __dev_pm_qos_read_value(struct device *dev); s32 dev_pm_qos_read_value(struct device *dev); int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, s32 value); @@ -92,8 +91,6 @@ int dev_pm_qos_add_global_notifier(struct notifier_block *notifier); int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); -int dev_pm_qos_add_ancestor_request(struct device *dev, - struct dev_pm_qos_request *req, s32 value); #else static inline int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, @@ -122,8 +119,6 @@ static inline int pm_qos_request_active(struct pm_qos_request *req) static inline s32 pm_qos_read_value(struct pm_qos_constraints *c) { return 0; } -static inline s32 __dev_pm_qos_read_value(struct device *dev) - { return 0; } static inline s32 dev_pm_qos_read_value(struct device *dev) { return 0; } static inline int dev_pm_qos_add_request(struct device *dev, @@ -155,9 +150,6 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev) { dev->power.power_state = PMSG_INVALID; } -static inline int dev_pm_qos_add_ancestor_request(struct device *dev, - struct dev_pm_qos_request *req, s32 value) - { return 0; } #endif #endif diff --git a/trunk/include/linux/pm_runtime.h b/trunk/include/linux/pm_runtime.h index 609daae7a014..d3085e72a0ee 100644 --- a/trunk/include/linux/pm_runtime.h +++ b/trunk/include/linux/pm_runtime.h @@ -45,8 +45,6 @@ extern void pm_runtime_irq_safe(struct device *dev); extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); -extern void pm_runtime_update_max_time_suspended(struct device *dev, - s64 delta_ns); static inline bool pm_children_suspended(struct device *dev) { @@ -150,9 +148,6 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev, static inline unsigned long pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } -static inline void pm_runtime_update_max_time_suspended(struct device *dev, - s64 delta_ns) {} - #endif /* !CONFIG_PM_RUNTIME */ static inline int pm_runtime_idle(struct device *dev) diff --git a/trunk/include/linux/sh_intc.h b/trunk/include/linux/sh_intc.h index b160645f5599..5812fefbcedf 100644 --- a/trunk/include/linux/sh_intc.h +++ b/trunk/include/linux/sh_intc.h @@ -95,7 +95,6 @@ struct intc_desc { unsigned int num_resources; intc_enum force_enable; intc_enum force_disable; - bool skip_syscore_suspend; struct intc_hw_desc hw; }; diff --git a/trunk/include/net/dst.h b/trunk/include/net/dst.h index 75766b42660e..6faec1a60216 100644 --- a/trunk/include/net/dst.h +++ b/trunk/include/net/dst.h @@ -53,7 +53,6 @@ struct dst_entry { #define DST_NOHASH 0x0008 #define DST_NOCACHE 0x0010 #define DST_NOCOUNT 0x0020 -#define DST_NOPEER 0x0040 short error; short obsolete; diff --git a/trunk/include/net/flow.h b/trunk/include/net/flow.h index 57f15a7f1cdd..a09447749e2d 100644 --- a/trunk/include/net/flow.h +++ b/trunk/include/net/flow.h @@ -207,7 +207,6 @@ extern struct flow_cache_object *flow_cache_lookup( u8 dir, flow_resolve_t resolver, void *ctx); extern void flow_cache_flush(void); -extern void flow_cache_flush_deferred(void); extern atomic_t flow_cache_genid; #endif diff --git a/trunk/include/net/sctp/structs.h b/trunk/include/net/sctp/structs.h index a15432da27c3..e90e7a9935dd 100644 --- a/trunk/include/net/sctp/structs.h +++ b/trunk/include/net/sctp/structs.h @@ -241,9 +241,6 @@ extern struct sctp_globals { * bits is an indicator of when to send and window update SACK. */ int rwnd_update_shift; - - /* Threshold for autoclose timeout, in seconds. */ - unsigned long max_autoclose; } sctp_globals; #define sctp_rto_initial (sctp_globals.rto_initial) @@ -284,7 +281,6 @@ extern struct sctp_globals { #define sctp_auth_enable (sctp_globals.auth_enable) #define sctp_checksum_disable (sctp_globals.checksum_disable) #define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift) -#define sctp_max_autoclose (sctp_globals.max_autoclose) /* SCTP Socket type: UDP or TCP style. */ typedef enum { diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index 32e39371fba6..abb6e0f0c3c3 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -637,14 +637,12 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) /* * Take into account size of receive queue and backlog queue - * Do not take into account this skb truesize, - * to allow even a single big packet to come. */ static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) { unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); - return qsize > sk->sk_rcvbuf; + return qsize + skb->truesize > sk->sk_rcvbuf; } /* The per-socket spinlock must be held here. */ diff --git a/trunk/include/trace/events/writeback.h b/trunk/include/trace/events/writeback.h index 99d1d0decf88..b99caa8b780c 100644 --- a/trunk/include/trace/events/writeback.h +++ b/trunk/include/trace/events/writeback.h @@ -21,16 +21,6 @@ {I_REFERENCED, "I_REFERENCED"} \ ) -#define WB_WORK_REASON \ - {WB_REASON_BACKGROUND, "background"}, \ - {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \ - {WB_REASON_SYNC, "sync"}, \ - {WB_REASON_PERIODIC, "periodic"}, \ - {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \ - {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \ - {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \ - {WB_REASON_FORKER_THREAD, "forker_thread"} - struct wb_writeback_work; DECLARE_EVENT_CLASS(writeback_work_class, @@ -65,7 +55,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __entry->for_kupdate, __entry->range_cyclic, __entry->for_background, - __print_symbolic(__entry->reason, WB_WORK_REASON) + wb_reason_name[__entry->reason] ) ); #define DEFINE_WRITEBACK_WORK_EVENT(name) \ @@ -194,8 +184,7 @@ TRACE_EVENT(writeback_queue_io, __entry->older, /* older_than_this in jiffies */ __entry->age, /* older_than_this in relative milliseconds */ __entry->moved, - __print_symbolic(__entry->reason, WB_WORK_REASON) - ) + wb_reason_name[__entry->reason]) ); TRACE_EVENT(global_dirty_state, diff --git a/trunk/kernel/cpu.c b/trunk/kernel/cpu.c index cf915b86a5fb..563f13609470 100644 --- a/trunk/kernel/cpu.c +++ b/trunk/kernel/cpu.c @@ -470,7 +470,7 @@ void __ref enable_nonboot_cpus(void) cpu_maps_update_done(); } -static int __init alloc_frozen_cpus(void) +static int alloc_frozen_cpus(void) { if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) return -ENOMEM; @@ -543,7 +543,7 @@ cpu_hotplug_pm_callback(struct notifier_block *nb, } -static int __init cpu_hotplug_pm_sync_init(void) +int cpu_hotplug_pm_sync_init(void) { pm_notifier(cpu_hotplug_pm_callback, 0); return 0; diff --git a/trunk/kernel/kmod.c b/trunk/kernel/kmod.c index a0a88543934e..81b4a27261b2 100644 --- a/trunk/kernel/kmod.c +++ b/trunk/kernel/kmod.c @@ -285,14 +285,14 @@ static int usermodehelper_disabled = 1; static atomic_t running_helpers = ATOMIC_INIT(0); /* - * Wait queue head used by usermodehelper_disable() to wait for all running + * Wait queue head used by usermodehelper_pm_callback() to wait for all running * helpers to finish. */ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); /* * Time to wait for running_helpers to become zero before the setting of - * usermodehelper_disabled in usermodehelper_disable() fails + * usermodehelper_disabled in usermodehelper_pm_callback() fails */ #define RUNNING_HELPERS_TIMEOUT (5 * HZ) diff --git a/trunk/kernel/power/user.c b/trunk/kernel/power/user.c index 78bdb4404aab..6b1ab7a88522 100644 --- a/trunk/kernel/power/user.c +++ b/trunk/kernel/power/user.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -380,6 +381,66 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, return error; } +#ifdef CONFIG_COMPAT + +struct compat_resume_swap_area { + compat_loff_t offset; + u32 dev; +} __packed; + +static long +snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t)); + + switch (cmd) { + case SNAPSHOT_GET_IMAGE_SIZE: + case SNAPSHOT_AVAIL_SWAP_SIZE: + case SNAPSHOT_ALLOC_SWAP_PAGE: { + compat_loff_t __user *uoffset = compat_ptr(arg); + loff_t offset; + mm_segment_t old_fs; + int err; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = snapshot_ioctl(file, cmd, (unsigned long) &offset); + set_fs(old_fs); + if (!err && put_user(offset, uoffset)) + err = -EFAULT; + return err; + } + + case SNAPSHOT_CREATE_IMAGE: + return snapshot_ioctl(file, cmd, + (unsigned long) compat_ptr(arg)); + + case SNAPSHOT_SET_SWAP_AREA: { + struct compat_resume_swap_area __user *u_swap_area = + compat_ptr(arg); + struct resume_swap_area swap_area; + mm_segment_t old_fs; + int err; + + err = get_user(swap_area.offset, &u_swap_area->offset); + err |= get_user(swap_area.dev, &u_swap_area->dev); + if (err) + return -EFAULT; + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = snapshot_ioctl(file, SNAPSHOT_SET_SWAP_AREA, + (unsigned long) &swap_area); + set_fs(old_fs); + return err; + } + + default: + return snapshot_ioctl(file, cmd, arg); + } +} + +#endif /* CONFIG_COMPAT */ + static const struct file_operations snapshot_fops = { .open = snapshot_open, .release = snapshot_release, @@ -387,6 +448,9 @@ static const struct file_operations snapshot_fops = { .write = snapshot_write, .llseek = no_llseek, .unlocked_ioctl = snapshot_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = snapshot_compat_ioctl, +#endif }; static struct miscdevice snapshot_device = { diff --git a/trunk/mm/filemap.c b/trunk/mm/filemap.c index 5f0a3c91fdac..c106d3b3cc64 100644 --- a/trunk/mm/filemap.c +++ b/trunk/mm/filemap.c @@ -1828,7 +1828,7 @@ static struct page *__read_cache_page(struct address_space *mapping, page = __page_cache_alloc(gfp | __GFP_COLD); if (!page) return ERR_PTR(-ENOMEM); - err = add_to_page_cache_lru(page, mapping, index, gfp); + err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (unlikely(err)) { page_cache_release(page); if (err == -EEXIST) @@ -1925,7 +1925,10 @@ static struct page *wait_on_page_read(struct page *page) * @gfp: the page allocator flags to use if allocating * * This is the same as "read_mapping_page(mapping, index, NULL)", but with - * any new page allocations done using the specified allocation flags. + * any new page allocations done using the specified allocation flags. Note + * that the Radix tree operations will still use GFP_KERNEL, so you can't + * expect to do this atomically or anything like that - but you can pass in + * other page requirements. * * If the page does not get brought uptodate, return -EIO. */ diff --git a/trunk/net/bluetooth/hci_conn.c b/trunk/net/bluetooth/hci_conn.c index c1c597e3e198..e0af7237cd92 100644 --- a/trunk/net/bluetooth/hci_conn.c +++ b/trunk/net/bluetooth/hci_conn.c @@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) goto encrypt; auth: - if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) + if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) return 0; if (!hci_conn_auth(conn, sec_level, auth_type)) diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c index 17b5b1cd9657..5ea94a1eecf2 100644 --- a/trunk/net/bluetooth/l2cap_core.c +++ b/trunk/net/bluetooth/l2cap_core.c @@ -2152,7 +2152,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi void *ptr = req->data; int type, olen; unsigned long val; - struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; + struct l2cap_conf_rfc rfc; BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); @@ -2271,16 +2271,6 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) } } - /* Use sane default values in case a misbehaving remote device - * did not send an RFC option. - */ - rfc.mode = chan->mode; - rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); - rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); - rfc.max_pdu_size = cpu_to_le16(chan->imtu); - - BT_ERR("Expected RFC option was not found, using defaults"); - done: switch (rfc.mode) { case L2CAP_MODE_ERTM: diff --git a/trunk/net/bluetooth/rfcomm/core.c b/trunk/net/bluetooth/rfcomm/core.c index 2d28dfe98389..4e32e18211f9 100644 --- a/trunk/net/bluetooth/rfcomm/core.c +++ b/trunk/net/bluetooth/rfcomm/core.c @@ -1146,7 +1146,6 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) if (list_empty(&s->dlcs)) { s->state = BT_DISCONN; rfcomm_send_disc(s, 0); - rfcomm_session_clear_timer(s); } break; diff --git a/trunk/net/bridge/br_netfilter.c b/trunk/net/bridge/br_netfilter.c index fa8b8f763580..d6ec3720c77e 100644 --- a/trunk/net/bridge/br_netfilter.c +++ b/trunk/net/bridge/br_netfilter.c @@ -114,18 +114,12 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo return NULL; } -static unsigned int fake_mtu(const struct dst_entry *dst) -{ - return dst->dev->mtu; -} - static struct dst_ops fake_dst_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .update_pmtu = fake_update_pmtu, .cow_metrics = fake_cow_metrics, .neigh_lookup = fake_neigh_lookup, - .mtu = fake_mtu, }; /* @@ -147,7 +141,7 @@ void br_netfilter_rtable_init(struct net_bridge *br) rt->dst.dev = br->dev; rt->dst.path = &rt->dst; dst_init_metrics(&rt->dst, br_dst_default_metrics, true); - rt->dst.flags = DST_NOXFRM | DST_NOPEER; + rt->dst.flags = DST_NOXFRM; rt->dst.ops = &fake_dst_ops; } diff --git a/trunk/net/core/flow.c b/trunk/net/core/flow.c index e318c7e98042..8ae42de9c79e 100644 --- a/trunk/net/core/flow.c +++ b/trunk/net/core/flow.c @@ -358,18 +358,6 @@ void flow_cache_flush(void) put_online_cpus(); } -static void flow_cache_flush_task(struct work_struct *work) -{ - flow_cache_flush(); -} - -static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task); - -void flow_cache_flush_deferred(void) -{ - schedule_work(&flow_cache_flush_work); -} - static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) { struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); diff --git a/trunk/net/core/net-sysfs.c b/trunk/net/core/net-sysfs.c index 385aefe53648..c71c434a4c05 100644 --- a/trunk/net/core/net-sysfs.c +++ b/trunk/net/core/net-sysfs.c @@ -665,14 +665,11 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, if (count) { int i; - if (count > INT_MAX) - return -EINVAL; - count = roundup_pow_of_two(count); - if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table)) - / sizeof(struct rps_dev_flow)) { + if (count > 1<<30) { /* Enforce a limit to prevent overflow */ return -EINVAL; } + count = roundup_pow_of_two(count); table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); if (!table) return -ENOMEM; diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c index b23f174ab84c..4ed7b1d12f5e 100644 --- a/trunk/net/core/sock.c +++ b/trunk/net/core/sock.c @@ -288,7 +288,11 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; - if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { + /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces + number of warnings when compiling with -W --ANK + */ + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= + (unsigned)sk->sk_rcvbuf) { atomic_inc(&sk->sk_drops); trace_sock_rcvqueue_full(sk, skb); return -ENOMEM; diff --git a/trunk/net/ipv4/ipconfig.c b/trunk/net/ipv4/ipconfig.c index 99ec116bef14..0da2afc97f32 100644 --- a/trunk/net/ipv4/ipconfig.c +++ b/trunk/net/ipv4/ipconfig.c @@ -253,10 +253,6 @@ static int __init ic_open_devs(void) } } - /* no point in waiting if we could not bring up at least one device */ - if (!ic_first_dev) - goto have_carrier; - /* wait for a carrier on at least one device */ start = jiffies; while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) { diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index 94cdbc55ca7e..46af62363b8c 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -91,7 +91,6 @@ #include #include #include -#include #include #include #include @@ -121,7 +120,6 @@ static int ip_rt_max_size; static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; -static int ip_rt_gc_interval __read_mostly = 60 * HZ; static int ip_rt_gc_min_interval __read_mostly = HZ / 2; static int ip_rt_redirect_number __read_mostly = 9; static int ip_rt_redirect_load __read_mostly = HZ / 50; @@ -135,9 +133,6 @@ static int ip_rt_min_advmss __read_mostly = 256; static int rt_chain_length_max __read_mostly = 20; static int redirect_genid; -static struct delayed_work expires_work; -static unsigned long expires_ljiffies; - /* * Interface to generic destination cache. */ @@ -835,97 +830,6 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth) return ONE; } -static void rt_check_expire(void) -{ - static unsigned int rover; - unsigned int i = rover, goal; - struct rtable *rth; - struct rtable __rcu **rthp; - unsigned long samples = 0; - unsigned long sum = 0, sum2 = 0; - unsigned long delta; - u64 mult; - - delta = jiffies - expires_ljiffies; - expires_ljiffies = jiffies; - mult = ((u64)delta) << rt_hash_log; - if (ip_rt_gc_timeout > 1) - do_div(mult, ip_rt_gc_timeout); - goal = (unsigned int)mult; - if (goal > rt_hash_mask) - goal = rt_hash_mask + 1; - for (; goal > 0; goal--) { - unsigned long tmo = ip_rt_gc_timeout; - unsigned long length; - - i = (i + 1) & rt_hash_mask; - rthp = &rt_hash_table[i].chain; - - if (need_resched()) - cond_resched(); - - samples++; - - if (rcu_dereference_raw(*rthp) == NULL) - continue; - length = 0; - spin_lock_bh(rt_hash_lock_addr(i)); - while ((rth = rcu_dereference_protected(*rthp, - lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) { - prefetch(rth->dst.rt_next); - if (rt_is_expired(rth)) { - *rthp = rth->dst.rt_next; - rt_free(rth); - continue; - } - if (rth->dst.expires) { - /* Entry is expired even if it is in use */ - if (time_before_eq(jiffies, rth->dst.expires)) { -nofree: - tmo >>= 1; - rthp = &rth->dst.rt_next; - /* - * We only count entries on - * a chain with equal hash inputs once - * so that entries for different QOS - * levels, and other non-hash input - * attributes don't unfairly skew - * the length computation - */ - length += has_noalias(rt_hash_table[i].chain, rth); - continue; - } - } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) - goto nofree; - - /* Cleanup aged off entries. */ - *rthp = rth->dst.rt_next; - rt_free(rth); - } - spin_unlock_bh(rt_hash_lock_addr(i)); - sum += length; - sum2 += length*length; - } - if (samples) { - unsigned long avg = sum / samples; - unsigned long sd = int_sqrt(sum2 / samples - avg*avg); - rt_chain_length_max = max_t(unsigned long, - ip_rt_gc_elasticity, - (avg + 4*sd) >> FRACT_BITS); - } - rover = i; -} - -/* - * rt_worker_func() is run in process context. - * we call rt_check_expire() to scan part of the hash table - */ -static void rt_worker_func(struct work_struct *work) -{ - rt_check_expire(); - schedule_delayed_work(&expires_work, ip_rt_gc_interval); -} - /* * Perturbation of rt_genid by a small quantity [1..256] * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() @@ -1367,7 +1271,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) { struct rtable *rt = (struct rtable *) dst; - if (rt && !(rt->dst.flags & DST_NOPEER)) { + if (rt) { if (rt->peer == NULL) rt_bind_peer(rt, rt->rt_dst, 1); @@ -1378,7 +1282,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) iph->id = htons(inet_getid(rt->peer, more)); return; } - } else if (!rt) + } else printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", __builtin_return_address(0)); @@ -3274,13 +3178,6 @@ static ctl_table ipv4_route_table[] = { .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - { - .procname = "gc_interval", - .data = &ip_rt_gc_interval, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, { .procname = "redirect_load", .data = &ip_rt_redirect_load, @@ -3491,11 +3388,6 @@ int __init ip_rt_init(void) devinet_init(); ip_fib_init(); - INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func); - expires_ljiffies = jiffies; - schedule_delayed_work(&expires_work, - net_random() % ip_rt_gc_interval + ip_rt_gc_interval); - if (ip_rt_proc_init()) printk(KERN_ERR "Unable to create route proc files\n"); #ifdef CONFIG_XFRM diff --git a/trunk/net/ipv6/ip6_output.c b/trunk/net/ipv6/ip6_output.c index ec562713db9b..84d0bd5cac93 100644 --- a/trunk/net/ipv6/ip6_output.c +++ b/trunk/net/ipv6/ip6_output.c @@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) static atomic_t ipv6_fragmentation_id; int old, new; - if (rt && !(rt->dst.flags & DST_NOPEER)) { + if (rt) { struct inet_peer *peer; if (!rt->rt6i_peer) diff --git a/trunk/net/llc/af_llc.c b/trunk/net/llc/af_llc.c index a18e6c3d36e3..dfd3a648a551 100644 --- a/trunk/net/llc/af_llc.c +++ b/trunk/net/llc/af_llc.c @@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, copied += used; len -= used; - /* For non stream protcols we get one packet per recvmsg call */ - if (sk->sk_type != SOCK_STREAM) - goto copy_uaddr; - if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, 0); *seq = 0; } + /* For non stream protcols we get one packet per recvmsg call */ + if (sk->sk_type != SOCK_STREAM) + goto copy_uaddr; + /* Partial read */ if (used + offset < skb->len) continue; @@ -857,12 +857,6 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, } if (llc_sk(sk)->cmsg_flags) llc_cmsg_rcv(msg, skb); - - if (!(flags & MSG_PEEK)) { - sk_eat_skb(sk, skb, 0); - *seq = 0; - } - goto out; } diff --git a/trunk/net/netfilter/xt_connbytes.c b/trunk/net/netfilter/xt_connbytes.c index 9ddf1c3bfb39..5b138506690e 100644 --- a/trunk/net/netfilter/xt_connbytes.c +++ b/trunk/net/netfilter/xt_connbytes.c @@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par) break; } - if (sinfo->count.to >= sinfo->count.from) + if (sinfo->count.to) return what <= sinfo->count.to && what >= sinfo->count.from; - else /* inverted */ - return what < sinfo->count.to || what > sinfo->count.from; + else + return what >= sinfo->count.from; } static int connbytes_mt_check(const struct xt_mtchk_param *par) diff --git a/trunk/net/nfc/nci/core.c b/trunk/net/nfc/nci/core.c index ea66034499ce..3925c6578767 100644 --- a/trunk/net/nfc/nci/core.c +++ b/trunk/net/nfc/nci/core.c @@ -69,7 +69,7 @@ static int __nci_request(struct nci_dev *ndev, __u32 timeout) { int rc = 0; - long completion_rc; + unsigned long completion_rc; ndev->req_status = NCI_REQ_PEND; diff --git a/trunk/net/packet/af_packet.c b/trunk/net/packet/af_packet.c index 3891702b81df..82a6f34d39d0 100644 --- a/trunk/net/packet/af_packet.c +++ b/trunk/net/packet/af_packet.c @@ -1630,7 +1630,8 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, if (snaplen > res) snaplen = res; - if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= + (unsigned)sk->sk_rcvbuf) goto drop_n_acct; if (skb_shared(skb)) { @@ -1761,7 +1762,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && - atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { + atomic_read(&sk->sk_rmem_alloc) + skb->truesize + < (unsigned)sk->sk_rcvbuf) { if (skb_shared(skb)) { copy_skb = skb_clone(skb, GFP_ATOMIC); } else { diff --git a/trunk/net/sched/sch_mqprio.c b/trunk/net/sched/sch_mqprio.c index 28de43092330..f88256cbacbf 100644 --- a/trunk/net/sched/sch_mqprio.c +++ b/trunk/net/sched/sch_mqprio.c @@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) if (!netif_is_multiqueue(dev)) return -EOPNOTSUPP; - if (!opt || nla_len(opt) < sizeof(*qopt)) + if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); diff --git a/trunk/net/sctp/associola.c b/trunk/net/sctp/associola.c index acd2edbc073e..152b5b3c3fff 100644 --- a/trunk/net/sctp/associola.c +++ b/trunk/net/sctp/associola.c @@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = - min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ; + (unsigned long)sp->autoclose * HZ; /* Initializes the timers */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) diff --git a/trunk/net/sctp/output.c b/trunk/net/sctp/output.c index 817174eb5f41..08b3cead6503 100644 --- a/trunk/net/sctp/output.c +++ b/trunk/net/sctp/output.c @@ -697,7 +697,13 @@ static void sctp_packet_append_data(struct sctp_packet *packet, /* Keep track of how many bytes are in flight to the receiver. */ asoc->outqueue.outstanding_bytes += datasize; - /* Update our view of the receiver's rwnd. */ + /* Update our view of the receiver's rwnd. Include sk_buff overhead + * while updating peer.rwnd so that it reduces the chances of a + * receiver running out of receive buffer space even when receive + * window is still open. This can happen when a sender is sending + * sending small messages. + */ + datasize += sizeof(struct sk_buff); if (datasize < rwnd) rwnd -= datasize; else diff --git a/trunk/net/sctp/outqueue.c b/trunk/net/sctp/outqueue.c index cfeb1d4a1ee6..14c2b06028ff 100644 --- a/trunk/net/sctp/outqueue.c +++ b/trunk/net/sctp/outqueue.c @@ -411,7 +411,8 @@ void sctp_retransmit_mark(struct sctp_outq *q, chunk->transport->flight_size -= sctp_data_size(chunk); q->outstanding_bytes -= sctp_data_size(chunk); - q->asoc->peer.rwnd += sctp_data_size(chunk); + q->asoc->peer.rwnd += (sctp_data_size(chunk) + + sizeof(struct sk_buff)); } continue; } @@ -431,7 +432,8 @@ void sctp_retransmit_mark(struct sctp_outq *q, * (Section 7.2.4)), add the data size of those * chunks to the rwnd. */ - q->asoc->peer.rwnd += sctp_data_size(chunk); + q->asoc->peer.rwnd += (sctp_data_size(chunk) + + sizeof(struct sk_buff)); q->outstanding_bytes -= sctp_data_size(chunk); if (chunk->transport) transport->flight_size -= sctp_data_size(chunk); diff --git a/trunk/net/sctp/protocol.c b/trunk/net/sctp/protocol.c index 6f6ad8686833..61b9fca5a173 100644 --- a/trunk/net/sctp/protocol.c +++ b/trunk/net/sctp/protocol.c @@ -1285,9 +1285,6 @@ SCTP_STATIC __init int sctp_init(void) sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; - /* Initialize maximum autoclose timeout. */ - sctp_max_autoclose = INT_MAX / HZ; - /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index 54a7cd2fdd7a..13bf5fcdbff1 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -2200,6 +2200,8 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, return -EINVAL; if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; + /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */ + sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ); return 0; } diff --git a/trunk/net/sctp/sysctl.c b/trunk/net/sctp/sysctl.c index 60ffbd067ff7..6b3952961b85 100644 --- a/trunk/net/sctp/sysctl.c +++ b/trunk/net/sctp/sysctl.c @@ -53,10 +53,6 @@ static int sack_timer_min = 1; static int sack_timer_max = 500; static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ static int rwnd_scale_max = 16; -static unsigned long max_autoclose_min = 0; -static unsigned long max_autoclose_max = - (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX) - ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ; extern long sysctl_sctp_mem[3]; extern int sysctl_sctp_rmem[3]; @@ -262,15 +258,6 @@ static ctl_table sctp_table[] = { .extra1 = &one, .extra2 = &rwnd_scale_max, }, - { - .procname = "max_autoclose", - .data = &sctp_max_autoclose, - .maxlen = sizeof(unsigned long), - .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, - .extra1 = &max_autoclose_min, - .extra2 = &max_autoclose_max, - }, { /* sentinel */ } }; diff --git a/trunk/net/xfrm/xfrm_policy.c b/trunk/net/xfrm/xfrm_policy.c index 9049a5caeb25..2118d6446630 100644 --- a/trunk/net/xfrm/xfrm_policy.c +++ b/trunk/net/xfrm/xfrm_policy.c @@ -2276,6 +2276,8 @@ static void __xfrm_garbage_collect(struct net *net) { struct dst_entry *head, *next; + flow_cache_flush(); + spin_lock_bh(&xfrm_policy_sk_bundle_lock); head = xfrm_policy_sk_bundles; xfrm_policy_sk_bundles = NULL; @@ -2288,18 +2290,6 @@ static void __xfrm_garbage_collect(struct net *net) } } -static void xfrm_garbage_collect(struct net *net) -{ - flow_cache_flush(); - __xfrm_garbage_collect(net); -} - -static void xfrm_garbage_collect_deferred(struct net *net) -{ - flow_cache_flush_deferred(); - __xfrm_garbage_collect(net); -} - static void xfrm_init_pmtu(struct dst_entry *dst) { do { @@ -2432,7 +2422,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) if (likely(dst_ops->neigh_lookup == NULL)) dst_ops->neigh_lookup = xfrm_neigh_lookup; if (likely(afinfo->garbage_collect == NULL)) - afinfo->garbage_collect = xfrm_garbage_collect_deferred; + afinfo->garbage_collect = __xfrm_garbage_collect; xfrm_policy_afinfo[afinfo->family] = afinfo; } write_unlock_bh(&xfrm_policy_afinfo_lock); @@ -2526,7 +2516,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void switch (event) { case NETDEV_DOWN: - xfrm_garbage_collect(dev_net(dev)); + __xfrm_garbage_collect(dev_net(dev)); } return NOTIFY_DONE; } diff --git a/trunk/scripts/kconfig/Makefile b/trunk/scripts/kconfig/Makefile index 914833d99b06..ba573fe7c74d 100644 --- a/trunk/scripts/kconfig/Makefile +++ b/trunk/scripts/kconfig/Makefile @@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h --directory=$(srctree) --directory=$(objtree) \ --output $(obj)/config.pot $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot - $(Q)(for i in `ls $(srctree)/arch/*/Kconfig \ - $(srctree)/arch/*/um/Kconfig`; \ + $(Q)ln -fs Kconfig.x86 arch/um/Kconfig + $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`; \ do \ echo " GEN $$i"; \ $(obj)/kxgettext $$i \ @@ -69,6 +69,7 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h done ) $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \ --output $(obj)/linux.pot + $(Q)rm -f $(srctree)/arch/um/Kconfig $(Q)rm -f $(obj)/config.pot PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig diff --git a/trunk/sound/atmel/ac97c.c b/trunk/sound/atmel/ac97c.c index 73516f69ac7c..6e5addeb236b 100644 --- a/trunk/sound/atmel/ac97c.c +++ b/trunk/sound/atmel/ac97c.c @@ -899,10 +899,6 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip) /* AC97 v2.2 specifications says minimum 1 us. */ udelay(2); gpio_set_value(chip->reset_pin, 1); - } else { - ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA); - udelay(2); - ac97c_writel(chip, MR, AC97C_MR_ENA); } }