diff --git a/[refs] b/[refs] index 0aac9037ac3f..6b7bec5a7992 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3632dee2f8b8a9720329f29eeaa4ec4669a3aff8 +refs/heads/master: b16ecfe2f985f77901a36ee5a99c7d3400313341 diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index d8362cf9909e..8511d3532c27 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -577,6 +577,9 @@ and is between 256 and 4096 characters. It is defined in the file a memory unit (amount[KMG]). See also Documentation/kdump/kdump.txt for a example. + cs4232= [HW,OSS] + Format: ,,,,, + cs89x0_dma= [HW,NET] Format: @@ -729,6 +732,10 @@ and is between 256 and 4096 characters. It is defined in the file Default value is 0. Value can be changed at runtime via /selinux/enforce. + es1371= [HW,OSS] + Format: ,[,[]] + See also header of sound/oss/es1371.c. + ether= [HW,NET] Ethernet cards parameters This option is obsoleted by the "netdev=" option, which has equivalent usage. See its documentation for details. diff --git a/trunk/Documentation/sound/alsa/HD-Audio-Models.txt b/trunk/Documentation/sound/alsa/HD-Audio-Models.txt index 0f5d26bea80f..64eb1100eec1 100644 --- a/trunk/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/trunk/Documentation/sound/alsa/HD-Audio-Models.txt @@ -349,7 +349,6 @@ STAC92HD73* STAC92HD83* =========== ref Reference board - mic-ref Reference board with power managment for ports STAC9872 ======== diff --git a/trunk/arch/x86/include/asm/bitops.h b/trunk/arch/x86/include/asm/bitops.h index 02b47a603fc8..e02a359d2aa5 100644 --- a/trunk/arch/x86/include/asm/bitops.h +++ b/trunk/arch/x86/include/asm/bitops.h @@ -3,9 +3,6 @@ /* * Copyright 1992, Linus Torvalds. - * - * Note: inlines with more than a single statement should be marked - * __always_inline to avoid problems with older gcc's inlining heuristics. */ #ifndef _LINUX_BITOPS_H @@ -56,8 +53,7 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static __always_inline void -set_bit(unsigned int nr, volatile unsigned long *addr) +static inline void set_bit(unsigned int nr, volatile unsigned long *addr) { if (IS_IMMEDIATE(nr)) { asm volatile(LOCK_PREFIX "orb %1,%0" @@ -94,8 +90,7 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static __always_inline void -clear_bit(int nr, volatile unsigned long *addr) +static inline void clear_bit(int nr, volatile unsigned long *addr) { if (IS_IMMEDIATE(nr)) { asm volatile(LOCK_PREFIX "andb %1,%0" @@ -209,8 +204,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) * * This is the same as test_and_set_bit on x86. */ -static __always_inline int -test_and_set_bit_lock(int nr, volatile unsigned long *addr) +static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) { return test_and_set_bit(nr, addr); } @@ -306,7 +300,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) return oldbit; } -static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) +static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) { return ((1UL << (nr % BITS_PER_LONG)) & (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; diff --git a/trunk/arch/x86/include/asm/io.h b/trunk/arch/x86/include/asm/io.h index 1dbbdf4be9b4..05cfed4485fa 100644 --- a/trunk/arch/x86/include/asm/io.h +++ b/trunk/arch/x86/include/asm/io.h @@ -99,6 +99,7 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); * A boot-time mapping is currently limited to at most 16 pages. */ extern void early_ioremap_init(void); +extern void early_ioremap_clear(void); extern void early_ioremap_reset(void); extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); extern void __iomem *early_memremap(unsigned long offset, unsigned long size); diff --git a/trunk/arch/x86/include/asm/msr-index.h b/trunk/arch/x86/include/asm/msr-index.h index 358acc59ae04..cb58643947b9 100644 --- a/trunk/arch/x86/include/asm/msr-index.h +++ b/trunk/arch/x86/include/asm/msr-index.h @@ -202,35 +202,6 @@ #define MSR_IA32_THERM_STATUS 0x0000019c #define MSR_IA32_MISC_ENABLE 0x000001a0 -/* MISC_ENABLE bits: architectural */ -#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) -#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) -#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7) -#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) -#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12) -#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16) -#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) -#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22) -#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23) -#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) - -/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ -#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2) -#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3) -#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4) -#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6) -#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8) -#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9) -#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10) -#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10) -#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13) -#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19) -#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20) -#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24) -#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37) -#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) -#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) - /* Intel Model 6 */ #define MSR_P6_EVNTSEL0 0x00000186 #define MSR_P6_EVNTSEL1 0x00000187 diff --git a/trunk/arch/x86/include/asm/pgalloc.h b/trunk/arch/x86/include/asm/pgalloc.h index dd14c54ac718..cb7c151a8bff 100644 --- a/trunk/arch/x86/include/asm/pgalloc.h +++ b/trunk/arch/x86/include/asm/pgalloc.h @@ -42,7 +42,6 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free(struct mm_struct *mm, struct page *pte) { - pgtable_page_dtor(pte); __free_page(pte); } diff --git a/trunk/arch/x86/include/asm/syscalls.h b/trunk/arch/x86/include/asm/syscalls.h index c0b0bda754ee..9c6797c3e56c 100644 --- a/trunk/arch/x86/include/asm/syscalls.h +++ b/trunk/arch/x86/include/asm/syscalls.h @@ -40,7 +40,7 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, struct old_sigaction __user *); asmlinkage int sys_sigaltstack(unsigned long); asmlinkage unsigned long sys_sigreturn(unsigned long); -asmlinkage int sys_rt_sigreturn(unsigned long); +asmlinkage int sys_rt_sigreturn(struct pt_regs); /* kernel/ioport.c */ asmlinkage long sys_iopl(unsigned long); diff --git a/trunk/arch/x86/include/asm/timex.h b/trunk/arch/x86/include/asm/timex.h index b5c9d45c981f..1287dc1347d6 100644 --- a/trunk/arch/x86/include/asm/timex.h +++ b/trunk/arch/x86/include/asm/timex.h @@ -1,13 +1,18 @@ +/* x86 architecture timex specifications */ #ifndef _ASM_X86_TIMEX_H #define _ASM_X86_TIMEX_H #include #include -/* The PIT ticks at this frequency (in HZ): */ -#define PIT_TICK_RATE 1193182 - -#define CLOCK_TICK_RATE PIT_TICK_RATE +#ifdef CONFIG_X86_ELAN +# define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */ +#elif defined(CONFIG_X86_RDC321X) +# define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */ +#else +# define PIT_TICK_RATE 1193182 /* Underlying HZ */ +#endif +#define CLOCK_TICK_RATE PIT_TICK_RATE #define ARCH_HAS_READ_CURRENT_TIMER diff --git a/trunk/arch/x86/kernel/apic.c b/trunk/arch/x86/kernel/apic.c index 4b6df2469fe3..0f830e4f5675 100644 --- a/trunk/arch/x86/kernel/apic.c +++ b/trunk/arch/x86/kernel/apic.c @@ -895,10 +895,6 @@ void disable_local_APIC(void) { unsigned int value; - /* APIC hasn't been mapped yet */ - if (!apic_phys) - return; - clear_local_APIC(); /* @@ -1837,11 +1833,6 @@ void __cpuinit generic_processor_info(int apicid, int version) num_processors++; cpu = cpumask_next_zero(-1, cpu_present_mask); - if (version != apic_version[boot_cpu_physical_apicid]) - WARN_ONCE(1, - "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", - apic_version[boot_cpu_physical_apicid], cpu, version); - physid_set(apicid, phys_cpu_present_map); if (apicid == boot_cpu_physical_apicid) { /* diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4b1c319d30c3..6f11e029e8c5 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -145,14 +145,13 @@ typedef union { struct drv_cmd { unsigned int type; - const struct cpumask *mask; + cpumask_var_t mask; drv_addr_union addr; u32 val; }; -static long do_drv_read(void *_cmd) +static void do_drv_read(struct drv_cmd *cmd) { - struct drv_cmd *cmd = _cmd; u32 h; switch (cmd->type) { @@ -167,12 +166,10 @@ static long do_drv_read(void *_cmd) default: break; } - return 0; } -static long do_drv_write(void *_cmd) +static void do_drv_write(struct drv_cmd *cmd) { - struct drv_cmd *cmd = _cmd; u32 lo, hi; switch (cmd->type) { @@ -189,23 +186,30 @@ static long do_drv_write(void *_cmd) default: break; } - return 0; } static void drv_read(struct drv_cmd *cmd) { + cpumask_t saved_mask = current->cpus_allowed; cmd->val = 0; - work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); + set_cpus_allowed_ptr(current, cmd->mask); + do_drv_read(cmd); + set_cpus_allowed_ptr(current, &saved_mask); } static void drv_write(struct drv_cmd *cmd) { + cpumask_t saved_mask = current->cpus_allowed; unsigned int i; for_each_cpu(i, cmd->mask) { - work_on_cpu(i, do_drv_write, cmd); + set_cpus_allowed_ptr(current, cpumask_of(i)); + do_drv_write(cmd); } + + set_cpus_allowed_ptr(current, &saved_mask); + return; } static u32 get_cur_val(const struct cpumask *mask) @@ -231,7 +235,8 @@ static u32 get_cur_val(const struct cpumask *mask) return 0; } - cmd.mask = mask; + cpumask_copy(cmd.mask, mask); + drv_read(&cmd); dprintk("get_cur_val = %u\n", cmd.val); @@ -363,7 +368,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) return freq; } -static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, +static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, struct acpi_cpufreq_data *data) { unsigned int cur_freq; @@ -398,6 +403,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, return -ENODEV; } + if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) + return -ENOMEM; + perf = data->acpi_data; result = cpufreq_frequency_table_target(policy, data->freq_table, @@ -442,9 +450,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, /* cpufreq holds the hotplug lock, so we are safe from here on */ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) - cmd.mask = policy->cpus; + cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); else - cmd.mask = cpumask_of(policy->cpu); + cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; @@ -471,6 +479,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, perf->state = next_perf_state; out: + free_cpumask_var(cmd.mask); return result; } diff --git a/trunk/arch/x86/kernel/cpu/intel.c b/trunk/arch/x86/kernel/cpu/intel.c index 549f2ada55f5..8ea6929e974c 100644 --- a/trunk/arch/x86/kernel/cpu/intel.c +++ b/trunk/arch/x86/kernel/cpu/intel.c @@ -29,19 +29,6 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { - /* Unmask CPUID levels if masked: */ - if (c->x86 == 6 && c->x86_model >= 15) { - u64 misc_enable; - - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - - if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { - misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; - wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - c->cpuid_level = cpuid_eax(0); - } - } - if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); diff --git a/trunk/arch/x86/kernel/cpu/mtrr/generic.c b/trunk/arch/x86/kernel/cpu/mtrr/generic.c index 0c0a455fe95c..b59ddcc88cd8 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/generic.c +++ b/trunk/arch/x86/kernel/cpu/mtrr/generic.c @@ -33,13 +33,11 @@ u64 mtrr_tom2; struct mtrr_state_type mtrr_state = {}; EXPORT_SYMBOL_GPL(mtrr_state); -static int __initdata mtrr_show; -static int __init mtrr_debug(char *opt) -{ - mtrr_show = 1; - return 0; -} -early_param("mtrr.show", mtrr_debug); +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "mtrr." + +static int mtrr_show; +module_param_named(show, mtrr_show, bool, 0); /* * Returns the effective MTRR type for the region diff --git a/trunk/arch/x86/kernel/hpet.c b/trunk/arch/x86/kernel/hpet.c index 64d5ad0b8add..cd759ad90690 100644 --- a/trunk/arch/x86/kernel/hpet.c +++ b/trunk/arch/x86/kernel/hpet.c @@ -628,12 +628,11 @@ static int hpet_cpuhp_notify(struct notifier_block *n, switch (action & 0xf) { case CPU_ONLINE: - INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); + INIT_DELAYED_WORK(&work.work, hpet_work); init_completion(&work.complete); /* FIXME: add schedule_work_on() */ schedule_delayed_work_on(cpu, &work.work, 0); wait_for_completion(&work.complete); - destroy_timer_on_stack(&work.work.timer); break; case CPU_DEAD: if (hdev) { diff --git a/trunk/arch/x86/kernel/setup_percpu.c b/trunk/arch/x86/kernel/setup_percpu.c index 01161077a49c..55c46074eba0 100644 --- a/trunk/arch/x86/kernel/setup_percpu.c +++ b/trunk/arch/x86/kernel/setup_percpu.c @@ -136,7 +136,7 @@ static void __init setup_cpu_pda_map(void) #ifdef CONFIG_X86_64 /* correctly size the local cpu masks */ -static void __init setup_cpu_local_masks(void) +static void setup_cpu_local_masks(void) { alloc_bootmem_cpumask_var(&cpu_initialized_mask); alloc_bootmem_cpumask_var(&cpu_callin_mask); diff --git a/trunk/arch/x86/kernel/signal.c b/trunk/arch/x86/kernel/signal.c index df0587f24c54..89bb7668041d 100644 --- a/trunk/arch/x86/kernel/signal.c +++ b/trunk/arch/x86/kernel/signal.c @@ -632,16 +632,9 @@ static long do_rt_sigreturn(struct pt_regs *regs) } #ifdef CONFIG_X86_32 -/* - * Note: do not pass in pt_regs directly as with tail-call optimization - * GCC will incorrectly stomp on the caller's frame and corrupt user-space - * register state: - */ -asmlinkage int sys_rt_sigreturn(unsigned long __unused) +asmlinkage int sys_rt_sigreturn(struct pt_regs regs) { - struct pt_regs *regs = (struct pt_regs *)&__unused; - - return do_rt_sigreturn(regs); + return do_rt_sigreturn(®s); } #else /* !CONFIG_X86_32 */ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) diff --git a/trunk/arch/x86/kernel/tlb_uv.c b/trunk/arch/x86/kernel/tlb_uv.c index 6812b829ed83..f885023167e0 100644 --- a/trunk/arch/x86/kernel/tlb_uv.c +++ b/trunk/arch/x86/kernel/tlb_uv.c @@ -200,7 +200,6 @@ static int uv_wait_completion(struct bau_desc *bau_desc, destination_timeouts = 0; } } - cpu_relax(); } return FLUSH_COMPLETE; } diff --git a/trunk/arch/x86/kernel/vmi_32.c b/trunk/arch/x86/kernel/vmi_32.c index 1d3302cc2ddf..23206ba16874 100644 --- a/trunk/arch/x86/kernel/vmi_32.c +++ b/trunk/arch/x86/kernel/vmi_32.c @@ -858,7 +858,7 @@ void __init vmi_init(void) #endif } -void __init vmi_activate(void) +void vmi_activate(void) { unsigned long flags; diff --git a/trunk/arch/x86/lib/usercopy_32.c b/trunk/arch/x86/lib/usercopy_32.c index 7c8ca91bb9ec..4a20b2f9a381 100644 --- a/trunk/arch/x86/lib/usercopy_32.c +++ b/trunk/arch/x86/lib/usercopy_32.c @@ -56,7 +56,7 @@ do { \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(0b,3b) \ - : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ + : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ "=&D" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ : "memory"); \ @@ -218,7 +218,7 @@ long strnlen_user(const char __user *s, long n) " .align 4\n" " .long 0b,2b\n" ".previous" - :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) + :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) :"0" (n), "1" (s), "2" (0), "3" (mask) :"cc"); return res & mask; diff --git a/trunk/arch/x86/lib/usercopy_64.c b/trunk/arch/x86/lib/usercopy_64.c index ec13cb5f17ed..64d6c84e6353 100644 --- a/trunk/arch/x86/lib/usercopy_64.c +++ b/trunk/arch/x86/lib/usercopy_64.c @@ -32,7 +32,7 @@ do { \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(0b,3b) \ - : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ + : "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ "=&D" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ : "memory"); \ @@ -86,7 +86,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) ".previous\n" _ASM_EXTABLE(0b,3b) _ASM_EXTABLE(1b,2b) - : [size8] "=&c"(size), [dst] "=&D" (__d0) + : [size8] "=c"(size), [dst] "=&D" (__d0) : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), [zero] "r" (0UL), [eight] "r" (8UL)); return size; diff --git a/trunk/arch/x86/mm/init_32.c b/trunk/arch/x86/mm/init_32.c index 2cef05074413..88f1b10de3be 100644 --- a/trunk/arch/x86/mm/init_32.c +++ b/trunk/arch/x86/mm/init_32.c @@ -138,47 +138,6 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) return pte_offset_kernel(pmd, 0); } -static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, - unsigned long vaddr, pte_t *lastpte) -{ -#ifdef CONFIG_HIGHMEM - /* - * Something (early fixmap) may already have put a pte - * page here, which causes the page table allocation - * to become nonlinear. Attempt to fix it, and if it - * is still nonlinear then we have to bug. - */ - int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; - int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; - - if (pmd_idx_kmap_begin != pmd_idx_kmap_end - && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin - && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end - && ((__pa(pte) >> PAGE_SHIFT) < table_start - || (__pa(pte) >> PAGE_SHIFT) >= table_end)) { - pte_t *newpte; - int i; - - BUG_ON(after_init_bootmem); - newpte = alloc_low_page(); - for (i = 0; i < PTRS_PER_PTE; i++) - set_pte(newpte + i, pte[i]); - - paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); - set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); - BUG_ON(newpte != pte_offset_kernel(pmd, 0)); - __flush_tlb_all(); - - paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); - pte = newpte; - } - BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) - && vaddr > fix_to_virt(FIX_KMAP_END) - && lastpte && lastpte + PTRS_PER_PTE != pte); -#endif - return pte; -} - /* * This function initializes a certain range of kernel virtual memory * with new bootmem page tables, everywhere page tables are missing in @@ -195,7 +154,6 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) unsigned long vaddr; pgd_t *pgd; pmd_t *pmd; - pte_t *pte = NULL; vaddr = start; pgd_idx = pgd_index(vaddr); @@ -207,8 +165,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) pmd = pmd + pmd_index(vaddr); for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { - pte = page_table_kmap_check(one_page_table_init(pmd), - pmd, vaddr, pte); + one_page_table_init(pmd); vaddr += PMD_SIZE; } @@ -551,6 +508,7 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base) * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap(): */ + early_ioremap_clear(); vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; page_table_range_init(vaddr, end, pgd_base); @@ -843,7 +801,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse) tables += PAGE_ALIGN(ptes * sizeof(pte_t)); /* for fixmap */ - tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); + tables += PAGE_SIZE * 2; /* * RED-PEN putting page tables only on node 0 could diff --git a/trunk/arch/x86/mm/init_64.c b/trunk/arch/x86/mm/init_64.c index e6d36b490250..23f68e77ad1f 100644 --- a/trunk/arch/x86/mm/init_64.c +++ b/trunk/arch/x86/mm/init_64.c @@ -596,7 +596,7 @@ static void __init init_gbpages(void) direct_gbpages = 0; } -static unsigned long __meminit kernel_physical_mapping_init(unsigned long start, +static unsigned long __init kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask) { diff --git a/trunk/arch/x86/mm/iomap_32.c b/trunk/arch/x86/mm/iomap_32.c index ca53224fc56c..d0151d8ce452 100644 --- a/trunk/arch/x86/mm/iomap_32.c +++ b/trunk/arch/x86/mm/iomap_32.c @@ -17,7 +17,6 @@ */ #include -#include #include /* Map 'pfn' using fixed map 'type' and protections 'prot' @@ -30,15 +29,6 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) pagefault_disable(); - /* - * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. - * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the - * MTRR is UC or WC. UC_MINUS gets the real intention, of the - * user, which is "WC if the MTRR is WC, UC if you can't do that." - */ - if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) - prot = PAGE_KERNEL_UC_MINUS; - idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); diff --git a/trunk/arch/x86/mm/ioremap.c b/trunk/arch/x86/mm/ioremap.c index af750ab973b6..bd85d42819e1 100644 --- a/trunk/arch/x86/mm/ioremap.c +++ b/trunk/arch/x86/mm/ioremap.c @@ -557,9 +557,34 @@ void __init early_ioremap_init(void) } } +void __init early_ioremap_clear(void) +{ + pmd_t *pmd; + + if (early_ioremap_debug) + printk(KERN_INFO "early_ioremap_clear()\n"); + + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); + pmd_clear(pmd); + paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT); + __flush_tlb_all(); +} + void __init early_ioremap_reset(void) { + enum fixed_addresses idx; + unsigned long addr, phys; + pte_t *pte; + after_paging_init = 1; + for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { + addr = fix_to_virt(idx); + pte = early_ioremap_pte(addr); + if (pte_present(*pte)) { + phys = pte_val(*pte) & PAGE_MASK; + set_fixmap(idx, phys); + } + } } static void __init __early_set_fixmap(enum fixed_addresses idx, diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c index 84ba74820ad6..e89d24815f26 100644 --- a/trunk/arch/x86/mm/pageattr.c +++ b/trunk/arch/x86/mm/pageattr.c @@ -534,36 +534,6 @@ static int split_large_page(pte_t *kpte, unsigned long address) return 0; } -static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, - int primary) -{ - /* - * Ignore all non primary paths. - */ - if (!primary) - return 0; - - /* - * Ignore the NULL PTE for kernel identity mapping, as it is expected - * to have holes. - * Also set numpages to '1' indicating that we processed cpa req for - * one virtual address page and its pfn. TBD: numpages can be set based - * on the initial value and the level returned by lookup_address(). - */ - if (within(vaddr, PAGE_OFFSET, - PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { - cpa->numpages = 1; - cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; - return 0; - } else { - WARN(1, KERN_WARNING "CPA: called for zero pte. " - "vaddr = %lx cpa->vaddr = %lx\n", vaddr, - *cpa->vaddr); - - return -EFAULT; - } -} - static int __change_page_attr(struct cpa_data *cpa, int primary) { unsigned long address; @@ -579,11 +549,17 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) repeat: kpte = lookup_address(address, &level); if (!kpte) - return __cpa_process_fault(cpa, address, primary); + return 0; old_pte = *kpte; - if (!pte_val(old_pte)) - return __cpa_process_fault(cpa, address, primary); + if (!pte_val(old_pte)) { + if (!primary) + return 0; + WARN(1, KERN_WARNING "CPA: called for zero pte. " + "vaddr = %lx cpa->vaddr = %lx\n", address, + *cpa->vaddr); + return -EINVAL; + } if (level == PG_LEVEL_4K) { pte_t new_pte; @@ -681,7 +657,12 @@ static int cpa_process_alias(struct cpa_data *cpa) vaddr = *cpa->vaddr; if (!(within(vaddr, PAGE_OFFSET, - PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { + PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) +#ifdef CONFIG_X86_64 + || within(vaddr, PAGE_OFFSET + (1UL<<32), + PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)) +#endif + )) { alias_cpa = *cpa; temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); diff --git a/trunk/arch/x86/mm/pat.c b/trunk/arch/x86/mm/pat.c index 7b61036427df..8b08fb955274 100644 --- a/trunk/arch/x86/mm/pat.c +++ b/trunk/arch/x86/mm/pat.c @@ -333,23 +333,11 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, req_type & _PAGE_CACHE_MASK); } - if (new_type) - *new_type = actual_type; - - /* - * For legacy reasons, some parts of the physical address range in the - * legacy 1MB region is treated as non-RAM (even when listed as RAM in - * the e820 tables). So we will track the memory attributes of this - * legacy 1MB region using the linear memtype_list always. - */ - if (end >= ISA_END_ADDRESS) { - is_range_ram = pagerange_is_ram(start, end); - if (is_range_ram == 1) - return reserve_ram_pages_type(start, end, req_type, - new_type); - else if (is_range_ram < 0) - return -EINVAL; - } + is_range_ram = pagerange_is_ram(start, end); + if (is_range_ram == 1) + return reserve_ram_pages_type(start, end, req_type, new_type); + else if (is_range_ram < 0) + return -EINVAL; new = kmalloc(sizeof(struct memtype), GFP_KERNEL); if (!new) @@ -359,6 +347,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, new->end = end; new->type = actual_type; + if (new_type) + *new_type = actual_type; + spin_lock(&memtype_lock); if (cached_entry && start >= cached_start) @@ -446,19 +437,11 @@ int free_memtype(u64 start, u64 end) if (is_ISA_range(start, end - 1)) return 0; - /* - * For legacy reasons, some parts of the physical address range in the - * legacy 1MB region is treated as non-RAM (even when listed as RAM in - * the e820 tables). So we will track the memory attributes of this - * legacy 1MB region using the linear memtype_list always. - */ - if (end >= ISA_END_ADDRESS) { - is_range_ram = pagerange_is_ram(start, end); - if (is_range_ram == 1) - return free_ram_pages_type(start, end); - else if (is_range_ram < 0) - return -EINVAL; - } + is_range_ram = pagerange_is_ram(start, end); + if (is_range_ram == 1) + return free_ram_pages_type(start, end); + else if (is_range_ram < 0) + return -EINVAL; spin_lock(&memtype_lock); list_for_each_entry(entry, &memtype_list, nd) { diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index 33fbeb664f08..96316fd47233 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -3427,7 +3427,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, ret = i915_gem_init_phys_object(dev, id, obj->size); if (ret) { - DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); + DRM_ERROR("failed to init phys object %d size: %d\n", id, obj->size); goto out; } } diff --git a/trunk/drivers/ide/falconide.c b/trunk/drivers/ide/falconide.c index a638e952d67a..a5ba820d69bb 100644 --- a/trunk/drivers/ide/falconide.c +++ b/trunk/drivers/ide/falconide.c @@ -82,7 +82,7 @@ static const struct ide_tp_ops falconide_tp_ops = { static const struct ide_port_info falconide_port_info = { .tp_ops = &falconide_tp_ops, - .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_SERIALIZE, + .host_flags = IDE_HFLAG_NO_DMA, }; static void __init falconide_setup_ports(hw_regs_t *hw) diff --git a/trunk/drivers/ide/ide-probe.c b/trunk/drivers/ide/ide-probe.c index 0db1ed9f5fc2..312127ea443a 100644 --- a/trunk/drivers/ide/ide-probe.c +++ b/trunk/drivers/ide/ide-probe.c @@ -649,8 +649,7 @@ static int ide_register_port(ide_hwif_t *hwif) /* register with global device tree */ dev_set_name(&hwif->gendev, hwif->name); hwif->gendev.driver_data = hwif; - if (hwif->gendev.parent == NULL) - hwif->gendev.parent = hwif->dev; + hwif->gendev.parent = hwif->dev; hwif->gendev.release = hwif_release_dev; ret = device_register(&hwif->gendev); diff --git a/trunk/drivers/ide/palm_bk3710.c b/trunk/drivers/ide/palm_bk3710.c index f38aac78044c..a7ac490c9ae3 100644 --- a/trunk/drivers/ide/palm_bk3710.c +++ b/trunk/drivers/ide/palm_bk3710.c @@ -346,8 +346,7 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) { struct clk *clk; struct resource *mem, *irq; - void __iomem *base; - unsigned long rate; + unsigned long base, rate; int i, rc; hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; @@ -383,13 +382,11 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) base = IO_ADDRESS(mem->start); /* Configure the Palm Chip controller */ - palm_bk3710_chipinit(base); + palm_bk3710_chipinit((void __iomem *)base); for (i = 0; i < IDE_NR_PORTS - 2; i++) - hw.io_ports_array[i] = (unsigned long) - (base + IDE_PALM_ATA_PRI_REG_OFFSET + i); - hw.io_ports.ctl_addr = (unsigned long) - (base + IDE_PALM_ATA_PRI_CTL_OFFSET); + hw.io_ports_array[i] = base + IDE_PALM_ATA_PRI_REG_OFFSET + i; + hw.io_ports.ctl_addr = base + IDE_PALM_ATA_PRI_CTL_OFFSET; hw.irq = irq->start; hw.dev = &pdev->dev; hw.chipset = ide_palm3710; diff --git a/trunk/drivers/oprofile/cpu_buffer.c b/trunk/drivers/oprofile/cpu_buffer.c index e76d715e4342..2e03b6d796d3 100644 --- a/trunk/drivers/oprofile/cpu_buffer.c +++ b/trunk/drivers/oprofile/cpu_buffer.c @@ -393,21 +393,16 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, return; fail: - entry->event = NULL; cpu_buf->sample_lost_overflow++; } int oprofile_add_data(struct op_entry *entry, unsigned long val) { - if (!entry->event) - return 0; return op_cpu_buffer_add_data(entry, val); } int oprofile_write_commit(struct op_entry *entry) { - if (!entry->event) - return -EINVAL; return op_cpu_buffer_write_commit(entry); } diff --git a/trunk/drivers/oprofile/cpu_buffer.h b/trunk/drivers/oprofile/cpu_buffer.h index 272995d20293..63f81c44846a 100644 --- a/trunk/drivers/oprofile/cpu_buffer.h +++ b/trunk/drivers/oprofile/cpu_buffer.h @@ -66,13 +66,6 @@ static inline void op_cpu_buffer_reset(int cpu) cpu_buf->last_task = NULL; } -/* - * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be - * called only if op_cpu_buffer_write_reserve() did not return NULL or - * entry->event != NULL, otherwise entry->size or entry->event will be - * used uninitialized. - */ - struct op_sample *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size); int op_cpu_buffer_write_commit(struct op_entry *entry); diff --git a/trunk/drivers/xen/balloon.c b/trunk/drivers/xen/balloon.c index 2ba8f95516a0..8dc7109d61b7 100644 --- a/trunk/drivers/xen/balloon.c +++ b/trunk/drivers/xen/balloon.c @@ -298,14 +298,6 @@ static int decrease_reservation(unsigned long nr_pages) frame_list[i] = pfn_to_mfn(pfn); scrub_page(page); - - if (!PageHighMem(page)) { - ret = HYPERVISOR_update_va_mapping( - (unsigned long)__va(pfn << PAGE_SHIFT), - __pte_ma(0), 0); - BUG_ON(ret); - } - } /* Ensure that ballooned highmem pages don't have kmaps. */ diff --git a/trunk/drivers/xen/xenfs/xenbus.c b/trunk/drivers/xen/xenfs/xenbus.c index a9592d981b10..875a4c59c594 100644 --- a/trunk/drivers/xen/xenfs/xenbus.c +++ b/trunk/drivers/xen/xenfs/xenbus.c @@ -291,7 +291,7 @@ static void watch_fired(struct xenbus_watch *watch, static int xenbus_write_transaction(unsigned msg_type, struct xenbus_file_priv *u) { - int rc; + int rc, ret; void *reply; struct xenbus_transaction_holder *trans = NULL; LIST_HEAD(staging_q); @@ -326,14 +326,15 @@ static int xenbus_write_transaction(unsigned msg_type, } mutex_lock(&u->reply_mutex); - rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); - if (!rc) - rc = queue_reply(&staging_q, reply, u->u.msg.len); - if (!rc) { + ret = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); + if (!ret) + ret = queue_reply(&staging_q, reply, u->u.msg.len); + if (!ret) { list_splice_tail(&staging_q, &u->read_buffers); wake_up(&u->read_waitq); } else { queue_cleanup(&staging_q); + rc = ret; } mutex_unlock(&u->reply_mutex); diff --git a/trunk/fs/Kconfig b/trunk/fs/Kconfig index 51307b0fdf0f..03fde694969e 100644 --- a/trunk/fs/Kconfig +++ b/trunk/fs/Kconfig @@ -27,91 +27,7 @@ config FS_MBCACHE default y if EXT4_FS=y && EXT4_FS_XATTR default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS_XATTR -config REISERFS_FS - tristate "Reiserfs support" - help - Stores not just filenames but the files themselves in a balanced - tree. Uses journalling. - - Balanced trees are more efficient than traditional file system - architectural foundations. - - In general, ReiserFS is as fast as ext2, but is very efficient with - large directories and small files. Additional patches are needed - for NFS and quotas, please see for links. - - It is more easily extended to have features currently found in - database and keyword search systems than block allocation based file - systems are. The next version will be so extended, and will support - plugins consistent with our motto ``It takes more than a license to - make source code open.'' - - Read to learn more about reiserfs. - - Sponsored by Threshold Networks, Emusic.com, and Bigstorage.com. - - If you like it, you can pay us to add new features to it that you - need, buy a support contract, or pay us to port it to another OS. - -config REISERFS_CHECK - bool "Enable reiserfs debug mode" - depends on REISERFS_FS - help - If you set this to Y, then ReiserFS will perform every check it can - possibly imagine of its internal consistency throughout its - operation. It will also go substantially slower. More than once we - have forgotten that this was on, and then gone despondent over the - latest benchmarks.:-) Use of this option allows our team to go all - out in checking for consistency when debugging without fear of its - effect on end users. If you are on the verge of sending in a bug - report, say Y and you might get a useful error message. Almost - everyone should say N. - -config REISERFS_PROC_INFO - bool "Stats in /proc/fs/reiserfs" - depends on REISERFS_FS && PROC_FS - help - Create under /proc/fs/reiserfs a hierarchy of files, displaying - various ReiserFS statistics and internal data at the expense of - making your kernel or module slightly larger (+8 KB). This also - increases the amount of kernel memory required for each mount. - Almost everyone but ReiserFS developers and people fine-tuning - reiserfs or tracing problems should say N. - -config REISERFS_FS_XATTR - bool "ReiserFS extended attributes" - depends on REISERFS_FS - help - Extended attributes are name:value pairs associated with inodes by - the kernel or by users (see the attr(5) manual page, or visit - for details). - - If unsure, say N. - -config REISERFS_FS_POSIX_ACL - bool "ReiserFS POSIX Access Control Lists" - depends on REISERFS_FS_XATTR - select FS_POSIX_ACL - help - Posix Access Control Lists (ACLs) support permissions for users and - groups beyond the owner/group/world scheme. - - To learn more about Access Control Lists, visit the Posix ACLs for - Linux website . - - If you don't know what Access Control Lists are, say N - -config REISERFS_FS_SECURITY - bool "ReiserFS Security Labels" - depends on REISERFS_FS_XATTR - help - Security labels support alternative access control models - implemented by security modules like SELinux. This option - enables an extended attribute handler for file security - labels in the ReiserFS filesystem. - - If you are not using a security module that requires using - extended attributes for file security labels, say N. +source "fs/reiserfs/Kconfig" config JFS_FS tristate "JFS filesystem support" diff --git a/trunk/fs/fuse/dev.c b/trunk/fs/fuse/dev.c index ba76b68c52ff..e0c7ada08a1f 100644 --- a/trunk/fs/fuse/dev.c +++ b/trunk/fs/fuse/dev.c @@ -281,8 +281,7 @@ __releases(&fc->lock) fc->blocked = 0; wake_up_all(&fc->blocked_waitq); } - if (fc->num_background == FUSE_CONGESTION_THRESHOLD && - fc->connected) { + if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { clear_bdi_congested(&fc->bdi, READ); clear_bdi_congested(&fc->bdi, WRITE); } @@ -826,21 +825,16 @@ static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_poll_wakeup_out outarg; - int err = -EINVAL; + int err; if (size != sizeof(outarg)) - goto err; + return -EINVAL; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) - goto err; + return err; - fuse_copy_finish(cs); return fuse_notify_poll_wakeup(fc, &outarg); - -err: - fuse_copy_finish(cs); - return err; } static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, @@ -851,7 +845,6 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, return fuse_notify_poll(fc, size, cs); default: - fuse_copy_finish(cs); return -EINVAL; } } @@ -930,6 +923,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, */ if (!oh.unique) { err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs); + fuse_copy_finish(&cs); return err ? err : nbytes; } diff --git a/trunk/fs/fuse/file.c b/trunk/fs/fuse/file.c index d9fdb7cec538..e8162646a9b5 100644 --- a/trunk/fs/fuse/file.c +++ b/trunk/fs/fuse/file.c @@ -54,7 +54,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) ff->reserved_req = fuse_request_alloc(); if (!ff->reserved_req) { kfree(ff); - return NULL; + ff = NULL; } else { INIT_LIST_HEAD(&ff->write_entry); atomic_set(&ff->count, 0); diff --git a/trunk/fs/fuse/inode.c b/trunk/fs/fuse/inode.c index 459b73dd45e1..47c96fdca1ac 100644 --- a/trunk/fs/fuse/inode.c +++ b/trunk/fs/fuse/inode.c @@ -292,7 +292,6 @@ static void fuse_put_super(struct super_block *sb) list_del(&fc->entry); fuse_ctl_remove_conn(fc); mutex_unlock(&fuse_mutex); - bdi_destroy(&fc->bdi); fuse_conn_put(fc); } @@ -533,6 +532,7 @@ void fuse_conn_put(struct fuse_conn *fc) if (fc->destroy_req) fuse_request_free(fc->destroy_req); mutex_destroy(&fc->inst_mutex); + bdi_destroy(&fc->bdi); fc->release(fc); } } @@ -805,18 +805,16 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) int err; int is_bdev = sb->s_bdev != NULL; - err = -EINVAL; if (sb->s_flags & MS_MANDLOCK) - goto err; + return -EINVAL; if (!parse_fuse_opt((char *) data, &d, is_bdev)) - goto err; + return -EINVAL; if (is_bdev) { #ifdef CONFIG_BLOCK - err = -EINVAL; if (!sb_set_blocksize(sb, d.blksize)) - goto err; + return -EINVAL; #endif } else { sb->s_blocksize = PAGE_CACHE_SIZE; @@ -828,22 +826,20 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) sb->s_export_op = &fuse_export_operations; file = fget(d.fd); - err = -EINVAL; if (!file) - goto err; + return -EINVAL; if (file->f_op != &fuse_dev_operations) - goto err_fput; + return -EINVAL; fc = kmalloc(sizeof(*fc), GFP_KERNEL); - err = -ENOMEM; if (!fc) - goto err_fput; + return -ENOMEM; err = fuse_conn_init(fc, sb); if (err) { kfree(fc); - goto err_fput; + return err; } fc->release = fuse_free_conn; @@ -858,12 +854,12 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) err = -ENOMEM; root = fuse_get_root_inode(sb, d.rootmode); if (!root) - goto err_put_conn; + goto err; root_dentry = d_alloc_root(root); if (!root_dentry) { iput(root); - goto err_put_conn; + goto err; } init_req = fuse_request_alloc(); @@ -907,11 +903,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) fuse_request_free(init_req); err_put_root: dput(root_dentry); - err_put_conn: - fuse_conn_put(fc); - err_fput: - fput(file); err: + fput(file); + fuse_conn_put(fc); return err; } diff --git a/trunk/fs/notify/inotify/inotify_user.c b/trunk/fs/notify/inotify/inotify_user.c index bed766e435b5..d53a1838d6e8 100644 --- a/trunk/fs/notify/inotify/inotify_user.c +++ b/trunk/fs/notify/inotify/inotify_user.c @@ -427,61 +427,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) return ret; } -/* - * Get an inotify_kernel_event if one exists and is small - * enough to fit in "count". Return an error pointer if - * not large enough. - * - * Called with the device ev_mutex held. - */ -static struct inotify_kernel_event *get_one_event(struct inotify_device *dev, - size_t count) -{ - size_t event_size = sizeof(struct inotify_event); - struct inotify_kernel_event *kevent; - - if (list_empty(&dev->events)) - return NULL; - - kevent = inotify_dev_get_event(dev); - if (kevent->name) - event_size += kevent->event.len; - - if (event_size > count) - return ERR_PTR(-EINVAL); - - remove_kevent(dev, kevent); - return kevent; -} - -/* - * Copy an event to user space, returning how much we copied. - * - * We already checked that the event size is smaller than the - * buffer we had in "get_one_event()" above. - */ -static ssize_t copy_event_to_user(struct inotify_kernel_event *kevent, - char __user *buf) -{ - size_t event_size = sizeof(struct inotify_event); - - if (copy_to_user(buf, &kevent->event, event_size)) - return -EFAULT; - - if (kevent->name) { - buf += event_size; - - if (copy_to_user(buf, kevent->name, kevent->event.len)) - return -EFAULT; - - event_size += kevent->event.len; - } - return event_size; -} - static ssize_t inotify_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { + size_t event_size = sizeof (struct inotify_event); struct inotify_device *dev; char __user *start; int ret; @@ -491,43 +440,81 @@ static ssize_t inotify_read(struct file *file, char __user *buf, dev = file->private_data; while (1) { - struct inotify_kernel_event *kevent; prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); mutex_lock(&dev->ev_mutex); - kevent = get_one_event(dev, count); - mutex_unlock(&dev->ev_mutex); - - if (kevent) { - ret = PTR_ERR(kevent); - if (IS_ERR(kevent)) - break; - ret = copy_event_to_user(kevent, buf); - free_kevent(kevent); - if (ret < 0) - break; - buf += ret; - count -= ret; - continue; + if (!list_empty(&dev->events)) { + ret = 0; + break; } + mutex_unlock(&dev->ev_mutex); - ret = -EAGAIN; - if (file->f_flags & O_NONBLOCK) - break; - ret = -EINTR; - if (signal_pending(current)) + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; break; + } - if (start != buf) + if (signal_pending(current)) { + ret = -EINTR; break; + } schedule(); } finish_wait(&dev->wq, &wait); - if (start != buf && ret != -EFAULT) + if (ret) + return ret; + + while (1) { + struct inotify_kernel_event *kevent; + ret = buf - start; + if (list_empty(&dev->events)) + break; + + kevent = inotify_dev_get_event(dev); + if (event_size + kevent->event.len > count) { + if (ret == 0 && count > 0) { + /* + * could not get a single event because we + * didn't have enough buffer space. + */ + ret = -EINVAL; + } + break; + } + remove_kevent(dev, kevent); + + /* + * Must perform the copy_to_user outside the mutex in order + * to avoid a lock order reversal with mmap_sem. + */ + mutex_unlock(&dev->ev_mutex); + + if (copy_to_user(buf, &kevent->event, event_size)) { + ret = -EFAULT; + break; + } + buf += event_size; + count -= event_size; + + if (kevent->name) { + if (copy_to_user(buf, kevent->name, kevent->event.len)){ + ret = -EFAULT; + break; + } + buf += kevent->event.len; + count -= kevent->event.len; + } + + free_kevent(kevent); + + mutex_lock(&dev->ev_mutex); + } + mutex_unlock(&dev->ev_mutex); + return ret; } diff --git a/trunk/fs/reiserfs/Kconfig b/trunk/fs/reiserfs/Kconfig new file mode 100644 index 000000000000..949b8c6addc8 --- /dev/null +++ b/trunk/fs/reiserfs/Kconfig @@ -0,0 +1,85 @@ +config REISERFS_FS + tristate "Reiserfs support" + help + Stores not just filenames but the files themselves in a balanced + tree. Uses journalling. + + Balanced trees are more efficient than traditional file system + architectural foundations. + + In general, ReiserFS is as fast as ext2, but is very efficient with + large directories and small files. Additional patches are needed + for NFS and quotas, please see for links. + + It is more easily extended to have features currently found in + database and keyword search systems than block allocation based file + systems are. The next version will be so extended, and will support + plugins consistent with our motto ``It takes more than a license to + make source code open.'' + + Read to learn more about reiserfs. + + Sponsored by Threshold Networks, Emusic.com, and Bigstorage.com. + + If you like it, you can pay us to add new features to it that you + need, buy a support contract, or pay us to port it to another OS. + +config REISERFS_CHECK + bool "Enable reiserfs debug mode" + depends on REISERFS_FS + help + If you set this to Y, then ReiserFS will perform every check it can + possibly imagine of its internal consistency throughout its + operation. It will also go substantially slower. More than once we + have forgotten that this was on, and then gone despondent over the + latest benchmarks.:-) Use of this option allows our team to go all + out in checking for consistency when debugging without fear of its + effect on end users. If you are on the verge of sending in a bug + report, say Y and you might get a useful error message. Almost + everyone should say N. + +config REISERFS_PROC_INFO + bool "Stats in /proc/fs/reiserfs" + depends on REISERFS_FS && PROC_FS + help + Create under /proc/fs/reiserfs a hierarchy of files, displaying + various ReiserFS statistics and internal data at the expense of + making your kernel or module slightly larger (+8 KB). This also + increases the amount of kernel memory required for each mount. + Almost everyone but ReiserFS developers and people fine-tuning + reiserfs or tracing problems should say N. + +config REISERFS_FS_XATTR + bool "ReiserFS extended attributes" + depends on REISERFS_FS + help + Extended attributes are name:value pairs associated with inodes by + the kernel or by users (see the attr(5) manual page, or visit + for details). + + If unsure, say N. + +config REISERFS_FS_POSIX_ACL + bool "ReiserFS POSIX Access Control Lists" + depends on REISERFS_FS_XATTR + select FS_POSIX_ACL + help + Posix Access Control Lists (ACLs) support permissions for users and + groups beyond the owner/group/world scheme. + + To learn more about Access Control Lists, visit the Posix ACLs for + Linux website . + + If you don't know what Access Control Lists are, say N + +config REISERFS_FS_SECURITY + bool "ReiserFS Security Labels" + depends on REISERFS_FS_XATTR + help + Security labels support alternative access control models + implemented by security modules like SELinux. This option + enables an extended attribute handler for file security + labels in the ReiserFS filesystem. + + If you are not using a security module that requires using + extended attributes for file security labels, say N. diff --git a/trunk/fs/xfs/Kconfig b/trunk/fs/xfs/Kconfig index 29228f5899cd..3f53dd101f99 100644 --- a/trunk/fs/xfs/Kconfig +++ b/trunk/fs/xfs/Kconfig @@ -1,7 +1,6 @@ config XFS_FS tristate "XFS filesystem support" depends on BLOCK - select EXPORTFS help XFS is a high performance journaling filesystem which originated on the SGI IRIX platform. It is completely multi-threaded, can diff --git a/trunk/fs/xfs/linux-2.6/xfs_ioctl.c b/trunk/fs/xfs/linux-2.6/xfs_ioctl.c index 4bd112313f33..e5be1e0be802 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/trunk/fs/xfs/linux-2.6/xfs_ioctl.c @@ -50,14 +50,12 @@ #include "xfs_vnodeops.h" #include "xfs_quota.h" #include "xfs_inode_item.h" -#include "xfs_export.h" #include #include #include #include #include -#include /* * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to @@ -166,69 +164,97 @@ xfs_find_handle( return 0; } -/* - * No need to do permission checks on the various pathname components - * as the handle operations are privileged. - */ -STATIC int -xfs_handle_acceptable( - void *context, - struct dentry *dentry) -{ - return 1; -} /* - * Convert userspace handle data into a dentry. + * Convert userspace handle data into inode. + * + * We use the fact that all the fsop_handlereq ioctl calls have a data + * structure argument whose first component is always a xfs_fsop_handlereq_t, + * so we can pass that sub structure into this handy, shared routine. + * + * If no error, caller must always iput the returned inode. */ -struct dentry * -xfs_handle_to_dentry( - struct file *parfilp, - void __user *uhandle, - u32 hlen) +STATIC int +xfs_vget_fsop_handlereq( + xfs_mount_t *mp, + struct inode *parinode, /* parent inode pointer */ + xfs_fsop_handlereq_t *hreq, + struct inode **inode) { + void __user *hanp; + size_t hlen; + xfs_fid_t *xfid; + xfs_handle_t *handlep; xfs_handle_t handle; - struct xfs_fid64 fid; + xfs_inode_t *ip; + xfs_ino_t ino; + __u32 igen; + int error; /* * Only allow handle opens under a directory. */ - if (!S_ISDIR(parfilp->f_path.dentry->d_inode->i_mode)) - return ERR_PTR(-ENOTDIR); - - if (hlen != sizeof(xfs_handle_t)) - return ERR_PTR(-EINVAL); - if (copy_from_user(&handle, uhandle, hlen)) - return ERR_PTR(-EFAULT); - if (handle.ha_fid.fid_len != - sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len)) - return ERR_PTR(-EINVAL); - - memset(&fid, 0, sizeof(struct fid)); - fid.ino = handle.ha_fid.fid_ino; - fid.gen = handle.ha_fid.fid_gen; - - return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3, - FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG, - xfs_handle_acceptable, NULL); -} + if (!S_ISDIR(parinode->i_mode)) + return XFS_ERROR(ENOTDIR); -STATIC struct dentry * -xfs_handlereq_to_dentry( - struct file *parfilp, - xfs_fsop_handlereq_t *hreq) -{ - return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen); + hanp = hreq->ihandle; + hlen = hreq->ihandlen; + handlep = &handle; + + if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) + return XFS_ERROR(EINVAL); + if (copy_from_user(handlep, hanp, hlen)) + return XFS_ERROR(EFAULT); + if (hlen < sizeof(*handlep)) + memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); + if (hlen > sizeof(handlep->ha_fsid)) { + if (handlep->ha_fid.fid_len != + (hlen - sizeof(handlep->ha_fsid) - + sizeof(handlep->ha_fid.fid_len)) || + handlep->ha_fid.fid_pad) + return XFS_ERROR(EINVAL); + } + + /* + * Crack the handle, obtain the inode # & generation # + */ + xfid = (struct xfs_fid *)&handlep->ha_fid; + if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) { + ino = xfid->fid_ino; + igen = xfid->fid_gen; + } else { + return XFS_ERROR(EINVAL); + } + + /* + * Get the XFS inode, building a Linux inode to go with it. + */ + error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); + if (error) + return error; + if (ip == NULL) + return XFS_ERROR(EIO); + if (ip->i_d.di_gen != igen) { + xfs_iput_new(ip, XFS_ILOCK_SHARED); + return XFS_ERROR(ENOENT); + } + + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + *inode = VFS_I(ip); + return 0; } int xfs_open_by_handle( + xfs_mount_t *mp, + xfs_fsop_handlereq_t *hreq, struct file *parfilp, - xfs_fsop_handlereq_t *hreq) + struct inode *parinode) { const struct cred *cred = current_cred(); int error; - int fd; + int new_fd; int permflag; struct file *filp; struct inode *inode; @@ -237,21 +263,19 @@ xfs_open_by_handle( if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); - dentry = xfs_handlereq_to_dentry(parfilp, hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - inode = dentry->d_inode; + error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode); + if (error) + return -error; /* Restrict xfs_open_by_handle to directories & regular files. */ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { - error = -XFS_ERROR(EPERM); - goto out_dput; + iput(inode); + return -XFS_ERROR(EINVAL); } #if BITS_PER_LONG != 32 hreq->oflags |= O_LARGEFILE; #endif - /* Put open permission in namei format. */ permflag = hreq->oflags; if ((permflag+1) & O_ACCMODE) @@ -261,45 +285,50 @@ xfs_open_by_handle( if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && (permflag & FMODE_WRITE) && IS_APPEND(inode)) { - error = -XFS_ERROR(EPERM); - goto out_dput; + iput(inode); + return -XFS_ERROR(EPERM); } if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) { - error = -XFS_ERROR(EACCES); - goto out_dput; + iput(inode); + return -XFS_ERROR(EACCES); } /* Can't write directories. */ - if (S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) { - error = -XFS_ERROR(EISDIR); - goto out_dput; + if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) { + iput(inode); + return -XFS_ERROR(EISDIR); } - fd = get_unused_fd(); - if (fd < 0) { - error = fd; - goto out_dput; + if ((new_fd = get_unused_fd()) < 0) { + iput(inode); + return new_fd; } - filp = dentry_open(dentry, mntget(parfilp->f_path.mnt), - hreq->oflags, cred); + dentry = d_obtain_alias(inode); + if (IS_ERR(dentry)) { + put_unused_fd(new_fd); + return PTR_ERR(dentry); + } + + /* Ensure umount returns EBUSY on umounts while this file is open. */ + mntget(parfilp->f_path.mnt); + + /* Create file pointer. */ + filp = dentry_open(dentry, parfilp->f_path.mnt, hreq->oflags, cred); if (IS_ERR(filp)) { - put_unused_fd(fd); - return PTR_ERR(filp); + put_unused_fd(new_fd); + return -XFS_ERROR(-PTR_ERR(filp)); } if (inode->i_mode & S_IFREG) { + /* invisible operation should not change atime */ filp->f_flags |= O_NOATIME; filp->f_mode |= FMODE_NOCMTIME; } - fd_install(fd, filp); - return fd; - - out_dput: - dput(dentry); - return error; + fd_install(new_fd, filp); + return new_fd; } /* @@ -330,10 +359,11 @@ do_readlink( int xfs_readlink_by_handle( - struct file *parfilp, - xfs_fsop_handlereq_t *hreq) + xfs_mount_t *mp, + xfs_fsop_handlereq_t *hreq, + struct inode *parinode) { - struct dentry *dentry; + struct inode *inode; __u32 olen; void *link; int error; @@ -341,28 +371,26 @@ xfs_readlink_by_handle( if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); - dentry = xfs_handlereq_to_dentry(parfilp, hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); + error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode); + if (error) + return -error; /* Restrict this handle operation to symlinks only. */ - if (!S_ISLNK(dentry->d_inode->i_mode)) { + if (!S_ISLNK(inode->i_mode)) { error = -XFS_ERROR(EINVAL); - goto out_dput; + goto out_iput; } if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { error = -XFS_ERROR(EFAULT); - goto out_dput; + goto out_iput; } link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); - if (!link) { - error = -XFS_ERROR(ENOMEM); - goto out_dput; - } + if (!link) + goto out_iput; - error = -xfs_readlink(XFS_I(dentry->d_inode), link); + error = -xfs_readlink(XFS_I(inode), link); if (error) goto out_kfree; error = do_readlink(hreq->ohandle, olen, link); @@ -371,31 +399,32 @@ xfs_readlink_by_handle( out_kfree: kfree(link); - out_dput: - dput(dentry); + out_iput: + iput(inode); return error; } STATIC int xfs_fssetdm_by_handle( - struct file *parfilp, - void __user *arg) + xfs_mount_t *mp, + void __user *arg, + struct inode *parinode) { int error; struct fsdmidata fsd; xfs_fsop_setdm_handlereq_t dmhreq; - struct dentry *dentry; + struct inode *inode; if (!capable(CAP_MKNOD)) return -XFS_ERROR(EPERM); if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) return -XFS_ERROR(EFAULT); - dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); + error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &inode); + if (error) + return -error; - if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { + if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) { error = -XFS_ERROR(EPERM); goto out; } @@ -405,23 +434,24 @@ xfs_fssetdm_by_handle( goto out; } - error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, + error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask, fsd.fsd_dmstate); out: - dput(dentry); + iput(inode); return error; } STATIC int xfs_attrlist_by_handle( - struct file *parfilp, - void __user *arg) + xfs_mount_t *mp, + void __user *arg, + struct inode *parinode) { - int error = -ENOMEM; + int error; attrlist_cursor_kern_t *cursor; xfs_fsop_attrlist_handlereq_t al_hreq; - struct dentry *dentry; + struct inode *inode; char *kbuf; if (!capable(CAP_SYS_ADMIN)) @@ -437,16 +467,16 @@ xfs_attrlist_by_handle( if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) return -XFS_ERROR(EINVAL); - dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); + error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode); + if (error) + goto out; kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); if (!kbuf) - goto out_dput; + goto out_vn_rele; cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; - error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, + error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen, al_hreq.flags, cursor); if (error) goto out_kfree; @@ -456,9 +486,10 @@ xfs_attrlist_by_handle( out_kfree: kfree(kbuf); - out_dput: - dput(dentry); - return error; + out_vn_rele: + iput(inode); + out: + return -error; } int @@ -533,13 +564,15 @@ xfs_attrmulti_attr_remove( STATIC int xfs_attrmulti_by_handle( + xfs_mount_t *mp, + void __user *arg, struct file *parfilp, - void __user *arg) + struct inode *parinode) { int error; xfs_attr_multiop_t *ops; xfs_fsop_attrmulti_handlereq_t am_hreq; - struct dentry *dentry; + struct inode *inode; unsigned int i, size; char *attr_name; @@ -548,19 +581,19 @@ xfs_attrmulti_by_handle( if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) return -XFS_ERROR(EFAULT); - dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); + error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &inode); + if (error) + goto out; error = E2BIG; size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); if (!size || size > 16 * PAGE_SIZE) - goto out_dput; + goto out_vn_rele; error = ENOMEM; ops = kmalloc(size, GFP_KERNEL); if (!ops) - goto out_dput; + goto out_vn_rele; error = EFAULT; if (copy_from_user(ops, am_hreq.ops, size)) @@ -582,28 +615,25 @@ xfs_attrmulti_by_handle( switch (ops[i].am_opcode) { case ATTR_OP_GET: - ops[i].am_error = xfs_attrmulti_attr_get( - dentry->d_inode, attr_name, - ops[i].am_attrvalue, &ops[i].am_length, - ops[i].am_flags); + ops[i].am_error = xfs_attrmulti_attr_get(inode, + attr_name, ops[i].am_attrvalue, + &ops[i].am_length, ops[i].am_flags); break; case ATTR_OP_SET: ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); if (ops[i].am_error) break; - ops[i].am_error = xfs_attrmulti_attr_set( - dentry->d_inode, attr_name, - ops[i].am_attrvalue, ops[i].am_length, - ops[i].am_flags); + ops[i].am_error = xfs_attrmulti_attr_set(inode, + attr_name, ops[i].am_attrvalue, + ops[i].am_length, ops[i].am_flags); mnt_drop_write(parfilp->f_path.mnt); break; case ATTR_OP_REMOVE: ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); if (ops[i].am_error) break; - ops[i].am_error = xfs_attrmulti_attr_remove( - dentry->d_inode, attr_name, - ops[i].am_flags); + ops[i].am_error = xfs_attrmulti_attr_remove(inode, + attr_name, ops[i].am_flags); mnt_drop_write(parfilp->f_path.mnt); break; default: @@ -617,8 +647,9 @@ xfs_attrmulti_by_handle( kfree(attr_name); out_kfree_ops: kfree(ops); - out_dput: - dput(dentry); + out_vn_rele: + iput(inode); + out: return -error; } @@ -1409,23 +1440,23 @@ xfs_file_ioctl( if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) return -XFS_ERROR(EFAULT); - return xfs_open_by_handle(filp, &hreq); + return xfs_open_by_handle(mp, &hreq, filp, inode); } case XFS_IOC_FSSETDM_BY_HANDLE: - return xfs_fssetdm_by_handle(filp, arg); + return xfs_fssetdm_by_handle(mp, arg, inode); case XFS_IOC_READLINK_BY_HANDLE: { xfs_fsop_handlereq_t hreq; if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) return -XFS_ERROR(EFAULT); - return xfs_readlink_by_handle(filp, &hreq); + return xfs_readlink_by_handle(mp, &hreq, inode); } case XFS_IOC_ATTRLIST_BY_HANDLE: - return xfs_attrlist_by_handle(filp, arg); + return xfs_attrlist_by_handle(mp, arg, inode); case XFS_IOC_ATTRMULTI_BY_HANDLE: - return xfs_attrmulti_by_handle(filp, arg); + return xfs_attrmulti_by_handle(mp, arg, filp, inode); case XFS_IOC_SWAPEXT: { struct xfs_swapext sxp; diff --git a/trunk/fs/xfs/linux-2.6/xfs_ioctl.h b/trunk/fs/xfs/linux-2.6/xfs_ioctl.h index 7bd7c6afc1eb..8c16bf2d7e03 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_ioctl.h +++ b/trunk/fs/xfs/linux-2.6/xfs_ioctl.h @@ -34,13 +34,16 @@ xfs_find_handle( extern int xfs_open_by_handle( + xfs_mount_t *mp, + xfs_fsop_handlereq_t *hreq, struct file *parfilp, - xfs_fsop_handlereq_t *hreq); + struct inode *parinode); extern int xfs_readlink_by_handle( - struct file *parfilp, - xfs_fsop_handlereq_t *hreq); + xfs_mount_t *mp, + xfs_fsop_handlereq_t *hreq, + struct inode *parinode); extern int xfs_attrmulti_attr_get( @@ -64,12 +67,6 @@ xfs_attrmulti_attr_remove( char *name, __uint32_t flags); -extern struct dentry * -xfs_handle_to_dentry( - struct file *parfilp, - void __user *uhandle, - u32 hlen); - extern long xfs_file_ioctl( struct file *filp, diff --git a/trunk/fs/xfs/linux-2.6/xfs_ioctl32.c b/trunk/fs/xfs/linux-2.6/xfs_ioctl32.c index c70c4e3db790..50903ad3182e 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/trunk/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -17,7 +17,6 @@ */ #include #include -#include #include #include "xfs.h" #include "xfs_fs.h" @@ -341,24 +340,96 @@ xfs_compat_handlereq_copyin( return 0; } -STATIC struct dentry * -xfs_compat_handlereq_to_dentry( - struct file *parfilp, - compat_xfs_fsop_handlereq_t *hreq) +/* + * Convert userspace handle data into inode. + * + * We use the fact that all the fsop_handlereq ioctl calls have a data + * structure argument whose first component is always a xfs_fsop_handlereq_t, + * so we can pass that sub structure into this handy, shared routine. + * + * If no error, caller must always iput the returned inode. + */ +STATIC int +xfs_vget_fsop_handlereq_compat( + xfs_mount_t *mp, + struct inode *parinode, /* parent inode pointer */ + compat_xfs_fsop_handlereq_t *hreq, + struct inode **inode) { - return xfs_handle_to_dentry(parfilp, - compat_ptr(hreq->ihandle), hreq->ihandlen); + void __user *hanp; + size_t hlen; + xfs_fid_t *xfid; + xfs_handle_t *handlep; + xfs_handle_t handle; + xfs_inode_t *ip; + xfs_ino_t ino; + __u32 igen; + int error; + + /* + * Only allow handle opens under a directory. + */ + if (!S_ISDIR(parinode->i_mode)) + return XFS_ERROR(ENOTDIR); + + hanp = compat_ptr(hreq->ihandle); + hlen = hreq->ihandlen; + handlep = &handle; + + if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) + return XFS_ERROR(EINVAL); + if (copy_from_user(handlep, hanp, hlen)) + return XFS_ERROR(EFAULT); + if (hlen < sizeof(*handlep)) + memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); + if (hlen > sizeof(handlep->ha_fsid)) { + if (handlep->ha_fid.fid_len != + (hlen - sizeof(handlep->ha_fsid) - + sizeof(handlep->ha_fid.fid_len)) || + handlep->ha_fid.fid_pad) + return XFS_ERROR(EINVAL); + } + + /* + * Crack the handle, obtain the inode # & generation # + */ + xfid = (struct xfs_fid *)&handlep->ha_fid; + if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) { + ino = xfid->fid_ino; + igen = xfid->fid_gen; + } else { + return XFS_ERROR(EINVAL); + } + + /* + * Get the XFS inode, building a Linux inode to go with it. + */ + error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); + if (error) + return error; + if (ip == NULL) + return XFS_ERROR(EIO); + if (ip->i_d.di_gen != igen) { + xfs_iput_new(ip, XFS_ILOCK_SHARED); + return XFS_ERROR(ENOENT); + } + + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + *inode = VFS_I(ip); + return 0; } STATIC int xfs_compat_attrlist_by_handle( - struct file *parfilp, - void __user *arg) + xfs_mount_t *mp, + void __user *arg, + struct inode *parinode) { int error; attrlist_cursor_kern_t *cursor; compat_xfs_fsop_attrlist_handlereq_t al_hreq; - struct dentry *dentry; + struct inode *inode; char *kbuf; if (!capable(CAP_SYS_ADMIN)) @@ -375,17 +446,17 @@ xfs_compat_attrlist_by_handle( if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) return -XFS_ERROR(EINVAL); - dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); + error = xfs_vget_fsop_handlereq_compat(mp, parinode, &al_hreq.hreq, + &inode); + if (error) + goto out; - error = -ENOMEM; kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); if (!kbuf) - goto out_dput; + goto out_vn_rele; cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; - error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, + error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen, al_hreq.flags, cursor); if (error) goto out_kfree; @@ -395,20 +466,22 @@ xfs_compat_attrlist_by_handle( out_kfree: kfree(kbuf); - out_dput: - dput(dentry); - return error; + out_vn_rele: + iput(inode); + out: + return -error; } STATIC int xfs_compat_attrmulti_by_handle( - struct file *parfilp, - void __user *arg) + xfs_mount_t *mp, + void __user *arg, + struct inode *parinode) { int error; compat_xfs_attr_multiop_t *ops; compat_xfs_fsop_attrmulti_handlereq_t am_hreq; - struct dentry *dentry; + struct inode *inode; unsigned int i, size; char *attr_name; @@ -418,19 +491,20 @@ xfs_compat_attrmulti_by_handle( sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) return -XFS_ERROR(EFAULT); - dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); + error = xfs_vget_fsop_handlereq_compat(mp, parinode, &am_hreq.hreq, + &inode); + if (error) + goto out; error = E2BIG; size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); if (!size || size > 16 * PAGE_SIZE) - goto out_dput; + goto out_vn_rele; error = ENOMEM; ops = kmalloc(size, GFP_KERNEL); if (!ops) - goto out_dput; + goto out_vn_rele; error = EFAULT; if (copy_from_user(ops, compat_ptr(am_hreq.ops), size)) @@ -453,29 +527,20 @@ xfs_compat_attrmulti_by_handle( switch (ops[i].am_opcode) { case ATTR_OP_GET: - ops[i].am_error = xfs_attrmulti_attr_get( - dentry->d_inode, attr_name, + ops[i].am_error = xfs_attrmulti_attr_get(inode, + attr_name, compat_ptr(ops[i].am_attrvalue), &ops[i].am_length, ops[i].am_flags); break; case ATTR_OP_SET: - ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); - if (ops[i].am_error) - break; - ops[i].am_error = xfs_attrmulti_attr_set( - dentry->d_inode, attr_name, + ops[i].am_error = xfs_attrmulti_attr_set(inode, + attr_name, compat_ptr(ops[i].am_attrvalue), ops[i].am_length, ops[i].am_flags); - mnt_drop_write(parfilp->f_path.mnt); break; case ATTR_OP_REMOVE: - ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); - if (ops[i].am_error) - break; - ops[i].am_error = xfs_attrmulti_attr_remove( - dentry->d_inode, attr_name, - ops[i].am_flags); - mnt_drop_write(parfilp->f_path.mnt); + ops[i].am_error = xfs_attrmulti_attr_remove(inode, + attr_name, ops[i].am_flags); break; default: ops[i].am_error = EINVAL; @@ -488,20 +553,22 @@ xfs_compat_attrmulti_by_handle( kfree(attr_name); out_kfree_ops: kfree(ops); - out_dput: - dput(dentry); + out_vn_rele: + iput(inode); + out: return -error; } STATIC int xfs_compat_fssetdm_by_handle( - struct file *parfilp, - void __user *arg) + xfs_mount_t *mp, + void __user *arg, + struct inode *parinode) { int error; struct fsdmidata fsd; compat_xfs_fsop_setdm_handlereq_t dmhreq; - struct dentry *dentry; + struct inode *inode; if (!capable(CAP_MKNOD)) return -XFS_ERROR(EPERM); @@ -509,11 +576,12 @@ xfs_compat_fssetdm_by_handle( sizeof(compat_xfs_fsop_setdm_handlereq_t))) return -XFS_ERROR(EFAULT); - dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); + error = xfs_vget_fsop_handlereq_compat(mp, parinode, &dmhreq.hreq, + &inode); + if (error) + return -error; - if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { + if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) { error = -XFS_ERROR(EPERM); goto out; } @@ -523,11 +591,11 @@ xfs_compat_fssetdm_by_handle( goto out; } - error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, + error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask, fsd.fsd_dmstate); out: - dput(dentry); + iput(inode); return error; } @@ -654,21 +722,21 @@ xfs_file_compat_ioctl( if (xfs_compat_handlereq_copyin(&hreq, arg)) return -XFS_ERROR(EFAULT); - return xfs_open_by_handle(filp, &hreq); + return xfs_open_by_handle(mp, &hreq, filp, inode); } case XFS_IOC_READLINK_BY_HANDLE_32: { struct xfs_fsop_handlereq hreq; if (xfs_compat_handlereq_copyin(&hreq, arg)) return -XFS_ERROR(EFAULT); - return xfs_readlink_by_handle(filp, &hreq); + return xfs_readlink_by_handle(mp, &hreq, inode); } case XFS_IOC_ATTRLIST_BY_HANDLE_32: - return xfs_compat_attrlist_by_handle(filp, arg); + return xfs_compat_attrlist_by_handle(mp, arg, inode); case XFS_IOC_ATTRMULTI_BY_HANDLE_32: - return xfs_compat_attrmulti_by_handle(filp, arg); + return xfs_compat_attrmulti_by_handle(mp, arg, inode); case XFS_IOC_FSSETDM_BY_HANDLE_32: - return xfs_compat_fssetdm_by_handle(filp, arg); + return xfs_compat_fssetdm_by_handle(mp, arg, inode); default: return -XFS_ERROR(ENOIOCTLCMD); } diff --git a/trunk/fs/xfs/linux-2.6/xfs_super.c b/trunk/fs/xfs/linux-2.6/xfs_super.c index c71e226da7f5..95a971080368 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_super.c +++ b/trunk/fs/xfs/linux-2.6/xfs_super.c @@ -1197,7 +1197,6 @@ xfs_fs_remount( struct xfs_mount *mp = XFS_M(sb); substring_t args[MAX_OPT_ARGS]; char *p; - int error; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -1248,25 +1247,11 @@ xfs_fs_remount( } } - /* ro -> rw */ + /* rw/ro -> rw */ if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { mp->m_flags &= ~XFS_MOUNT_RDONLY; if (mp->m_flags & XFS_MOUNT_BARRIER) xfs_mountfs_check_barriers(mp); - - /* - * If this is the first remount to writeable state we - * might have some superblock changes to update. - */ - if (mp->m_update_flags) { - error = xfs_mount_log_sb(mp, mp->m_update_flags); - if (error) { - cmn_err(CE_WARN, - "XFS: failed to write sb changes"); - return error; - } - mp->m_update_flags = 0; - } } /* rw -> ro */ diff --git a/trunk/fs/xfs/quota/xfs_dquot.c b/trunk/fs/xfs/quota/xfs_dquot.c index 6543c0b29753..591ca6602bfb 100644 --- a/trunk/fs/xfs/quota/xfs_dquot.c +++ b/trunk/fs/xfs/quota/xfs_dquot.c @@ -73,8 +73,6 @@ int xfs_dqreq_num; int xfs_dqerror_mod = 33; #endif -static struct lock_class_key xfs_dquot_other_class; - /* * Allocate and initialize a dquot. We don't always allocate fresh memory; * we try to reclaim a free dquot if the number of incore dquots are above @@ -141,15 +139,7 @@ xfs_qm_dqinit( ASSERT(dqp->q_trace); xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT"); #endif - } - - /* - * In either case we need to make sure group quotas have a different - * lock class than user quotas, to make sure lockdep knows we can - * locks of one of each at the same time. - */ - if (!(type & XFS_DQ_USER)) - lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); + } /* * log item gets initialized later @@ -431,7 +421,7 @@ xfs_qm_dqalloc( /* * Initialize the bmap freelist prior to calling bmapi code. */ - xfs_bmap_init(&flist, &firstblock); + XFS_BMAP_INIT(&flist, &firstblock); xfs_ilock(quotip, XFS_ILOCK_EXCL); /* * Return if this type of quotas is turned off while we didn't @@ -1393,12 +1383,6 @@ xfs_dqunlock_nonotify( mutex_unlock(&(dqp->q_qlock)); } -/* - * Lock two xfs_dquot structures. - * - * To avoid deadlocks we always lock the quota structure with - * the lowerd id first. - */ void xfs_dqlock2( xfs_dquot_t *d1, @@ -1408,16 +1392,18 @@ xfs_dqlock2( ASSERT(d1 != d2); if (be32_to_cpu(d1->q_core.d_id) > be32_to_cpu(d2->q_core.d_id)) { - mutex_lock(&d2->q_qlock); - mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); + xfs_dqlock(d2); + xfs_dqlock(d1); } else { - mutex_lock(&d1->q_qlock); - mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); + xfs_dqlock(d1); + xfs_dqlock(d2); + } + } else { + if (d1) { + xfs_dqlock(d1); + } else if (d2) { + xfs_dqlock(d2); } - } else if (d1) { - mutex_lock(&d1->q_qlock); - } else if (d2) { - mutex_lock(&d2->q_qlock); } } diff --git a/trunk/fs/xfs/quota/xfs_dquot.h b/trunk/fs/xfs/quota/xfs_dquot.h index d443e93b4331..7e455337e2ba 100644 --- a/trunk/fs/xfs/quota/xfs_dquot.h +++ b/trunk/fs/xfs/quota/xfs_dquot.h @@ -97,16 +97,6 @@ typedef struct xfs_dquot { #define dq_hashlist q_lists.dqm_hashlist #define dq_flags q_lists.dqm_flags -/* - * Lock hierachy for q_qlock: - * XFS_QLOCK_NORMAL is the implicit default, - * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2 - */ -enum { - XFS_QLOCK_NORMAL = 0, - XFS_QLOCK_NESTED, -}; - #define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++) #ifdef DEBUG diff --git a/trunk/fs/xfs/quota/xfs_qm.c b/trunk/fs/xfs/quota/xfs_qm.c index 7a2beb64314f..6b13960cf318 100644 --- a/trunk/fs/xfs/quota/xfs_qm.c +++ b/trunk/fs/xfs/quota/xfs_qm.c @@ -1070,13 +1070,6 @@ xfs_qm_sync( return 0; } -/* - * The hash chains and the mplist use the same xfs_dqhash structure as - * their list head, but we can take the mplist qh_lock and one of the - * hash qh_locks at the same time without any problem as they aren't - * related. - */ -static struct lock_class_key xfs_quota_mplist_class; /* * This initializes all the quota information that's kept in the @@ -1112,8 +1105,6 @@ xfs_qm_init_quotainfo( } xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); - lockdep_set_class(&qinf->qi_dqlist.qh_lock, &xfs_quota_mplist_class); - qinf->qi_dqreclaims = 0; /* mutex used to serialize quotaoffs */ diff --git a/trunk/fs/xfs/xfs_ag.h b/trunk/fs/xfs/xfs_ag.h index 143d63ecb20a..d3b3cf742999 100644 --- a/trunk/fs/xfs/xfs_ag.h +++ b/trunk/fs/xfs/xfs_ag.h @@ -244,8 +244,8 @@ typedef struct xfs_perag #define XFS_AG_CHECK_DADDR(mp,d,len) \ ((len) == 1 ? \ ASSERT((d) == XFS_SB_DADDR || \ - xfs_daddr_to_agbno(mp, d) != XFS_SB_DADDR) : \ - ASSERT(xfs_daddr_to_agno(mp, d) == \ - xfs_daddr_to_agno(mp, (d) + (len) - 1))) + XFS_DADDR_TO_AGBNO(mp, d) != XFS_SB_DADDR) : \ + ASSERT(XFS_DADDR_TO_AGNO(mp, d) == \ + XFS_DADDR_TO_AGNO(mp, (d) + (len) - 1))) #endif /* __XFS_AG_H__ */ diff --git a/trunk/fs/xfs/xfs_alloc_btree.c b/trunk/fs/xfs/xfs_alloc_btree.c index c10c3a292d30..733cb75a8c5d 100644 --- a/trunk/fs/xfs/xfs_alloc_btree.c +++ b/trunk/fs/xfs/xfs_alloc_btree.c @@ -115,7 +115,7 @@ xfs_allocbt_free_block( xfs_agblock_t bno; int error; - bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); + bno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(bp)); error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); if (error) return error; diff --git a/trunk/fs/xfs/xfs_attr.c b/trunk/fs/xfs/xfs_attr.c index 5fde1654b430..f7cdc28aff41 100644 --- a/trunk/fs/xfs/xfs_attr.c +++ b/trunk/fs/xfs/xfs_attr.c @@ -374,7 +374,7 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, * It won't fit in the shortform, transform to a leaf block. * GROT: another possible req'mt for a double-split btree op. */ - xfs_bmap_init(args.flist, args.firstblock); + XFS_BMAP_INIT(args.flist, args.firstblock); error = xfs_attr_shortform_to_leaf(&args); if (!error) { error = xfs_bmap_finish(&args.trans, args.flist, @@ -956,7 +956,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) * Commit that transaction so that the node_addname() call * can manage its own transactions. */ - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); error = xfs_attr_leaf_to_node(args); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, @@ -1057,7 +1057,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) * If the result is small enough, shrink it all into the inode. */ if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); error = xfs_attr_leaf_to_shortform(bp, args, forkoff); /* bp is gone due to xfs_da_shrink_inode */ if (!error) { @@ -1135,7 +1135,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args) * If the result is small enough, shrink it all into the inode. */ if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); error = xfs_attr_leaf_to_shortform(bp, args, forkoff); /* bp is gone due to xfs_da_shrink_inode */ if (!error) { @@ -1290,7 +1290,7 @@ xfs_attr_node_addname(xfs_da_args_t *args) * have been a b-tree. */ xfs_da_state_free(state); - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); error = xfs_attr_leaf_to_node(args); if (!error) { error = xfs_bmap_finish(&args->trans, @@ -1331,7 +1331,7 @@ xfs_attr_node_addname(xfs_da_args_t *args) * in the index/blkno/rmtblkno/rmtblkcnt fields and * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields. */ - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); error = xfs_da_split(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, @@ -1443,7 +1443,7 @@ xfs_attr_node_addname(xfs_da_args_t *args) * Check to see if the tree needs to be collapsed. */ if (retval && (state->path.active > 1)) { - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); error = xfs_da_join(state); if (!error) { error = xfs_bmap_finish(&args->trans, @@ -1579,7 +1579,7 @@ xfs_attr_node_removename(xfs_da_args_t *args) * Check to see if the tree needs to be collapsed. */ if (retval && (state->path.active > 1)) { - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); error = xfs_da_join(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, @@ -1630,7 +1630,7 @@ xfs_attr_node_removename(xfs_da_args_t *args) == XFS_ATTR_LEAF_MAGIC); if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); error = xfs_attr_leaf_to_shortform(bp, args, forkoff); /* bp is gone due to xfs_da_shrink_inode */ if (!error) { @@ -2069,7 +2069,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) /* * Allocate a single extent, up to the size of the value. */ - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); nmap = 1; error = xfs_bmapi(args->trans, dp, (xfs_fileoff_t)lblkno, blkcnt, @@ -2123,7 +2123,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) /* * Try to remember where we decided to put the value. */ - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); nmap = 1; error = xfs_bmapi(NULL, dp, (xfs_fileoff_t)lblkno, args->rmtblkcnt, @@ -2188,7 +2188,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) /* * Try to remember where we decided to put the value. */ - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); nmap = 1; error = xfs_bmapi(NULL, args->dp, (xfs_fileoff_t)lblkno, args->rmtblkcnt, @@ -2229,7 +2229,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) blkcnt = args->rmtblkcnt; done = 0; while (!done) { - xfs_bmap_init(args->flist, args->firstblock); + XFS_BMAP_INIT(args->flist, args->firstblock); error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 1, args->firstblock, args->flist, diff --git a/trunk/fs/xfs/xfs_bmap.c b/trunk/fs/xfs/xfs_bmap.c index c852cd65aaea..138308e70d14 100644 --- a/trunk/fs/xfs/xfs_bmap.c +++ b/trunk/fs/xfs/xfs_bmap.c @@ -595,9 +595,9 @@ xfs_bmap_add_extent( xfs_iext_insert(ifp, 0, 1, new); ASSERT(cur == NULL); ifp->if_lastex = 0; - if (!isnullstartblock(new->br_startblock)) { + if (!ISNULLSTARTBLOCK(new->br_startblock)) { XFS_IFORK_NEXT_SET(ip, whichfork, 1); - logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); + logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); } else logflags = 0; /* DELTA: single new extent */ @@ -613,7 +613,7 @@ xfs_bmap_add_extent( /* * Any kind of new delayed allocation goes here. */ - else if (isnullstartblock(new->br_startblock)) { + else if (ISNULLSTARTBLOCK(new->br_startblock)) { if (cur) ASSERT((cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL) == 0); @@ -644,11 +644,11 @@ xfs_bmap_add_extent( * in a delayed or unwritten allocation with a real one, or * converting real back to unwritten. */ - if (!isnullstartblock(new->br_startblock) && + if (!ISNULLSTARTBLOCK(new->br_startblock) && new->br_startoff + new->br_blockcount > prev.br_startoff) { if (prev.br_state != XFS_EXT_UNWRITTEN && - isnullstartblock(prev.br_startblock)) { - da_old = startblockval(prev.br_startblock); + ISNULLSTARTBLOCK(prev.br_startblock)) { + da_old = STARTBLOCKVAL(prev.br_startblock); if (cur) ASSERT(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL); @@ -803,7 +803,7 @@ xfs_bmap_add_extent_delay_real( */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); - STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock)); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); } STATE_SET(LEFT_CONTIG, STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && @@ -820,7 +820,7 @@ xfs_bmap_add_extent_delay_real( idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); - STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock)); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); } STATE_SET(RIGHT_CONTIG, STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && @@ -1019,8 +1019,8 @@ xfs_bmap_add_extent_delay_real( goto done; } temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), - startblockval(PREV.br_startblock)); - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); + STARTBLOCKVAL(PREV.br_startblock)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); *dnew = temp; /* DELTA: The boundary between two in-core extents moved. */ @@ -1067,10 +1067,10 @@ xfs_bmap_add_extent_delay_real( goto done; } temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), - startblockval(PREV.br_startblock) - + STARTBLOCKVAL(PREV.br_startblock) - (cur ? cur->bc_private.b.allocated : 0)); ep = xfs_iext_get_ext(ifp, idx + 1); - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK); *dnew = temp; /* DELTA: One in-core extent is split in two. */ @@ -1110,8 +1110,8 @@ xfs_bmap_add_extent_delay_real( goto done; } temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), - startblockval(PREV.br_startblock)); - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); + STARTBLOCKVAL(PREV.br_startblock)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); *dnew = temp; /* DELTA: The boundary between two in-core extents moved. */ @@ -1157,10 +1157,10 @@ xfs_bmap_add_extent_delay_real( goto done; } temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), - startblockval(PREV.br_startblock) - + STARTBLOCKVAL(PREV.br_startblock) - (cur ? cur->bc_private.b.allocated : 0)); ep = xfs_iext_get_ext(ifp, idx); - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); *dnew = temp; /* DELTA: One in-core extent is split in two. */ @@ -1213,7 +1213,7 @@ xfs_bmap_add_extent_delay_real( } temp = xfs_bmap_worst_indlen(ip, temp); temp2 = xfs_bmap_worst_indlen(ip, temp2); - diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - + diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) - (cur ? cur->bc_private.b.allocated : 0)); if (diff > 0 && xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) { @@ -1241,11 +1241,11 @@ xfs_bmap_add_extent_delay_real( } } ep = xfs_iext_get_ext(ifp, idx); - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), - nullstartblock((int)temp2)); + NULLSTARTBLOCK((int)temp2)); XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); *dnew = temp + temp2; /* DELTA: One in-core extent is split in three. */ @@ -1365,7 +1365,7 @@ xfs_bmap_add_extent_unwritten_real( */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); - STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock)); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); } STATE_SET(LEFT_CONTIG, STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && @@ -1382,7 +1382,7 @@ xfs_bmap_add_extent_unwritten_real( idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); - STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock)); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); } STATE_SET(RIGHT_CONTIG, STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && @@ -1889,13 +1889,13 @@ xfs_bmap_add_extent_hole_delay( ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); ep = xfs_iext_get_ext(ifp, idx); state = 0; - ASSERT(isnullstartblock(new->br_startblock)); + ASSERT(ISNULLSTARTBLOCK(new->br_startblock)); /* * Check and set flags if this segment has a left neighbor */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); - STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock)); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); } /* * Check and set flags if the current (right) segment exists. @@ -1905,7 +1905,7 @@ xfs_bmap_add_extent_hole_delay( idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { xfs_bmbt_get_all(ep, &right); - STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock)); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock)); } /* * Set contiguity flags on the left and right neighbors. @@ -1938,12 +1938,12 @@ xfs_bmap_add_extent_hole_delay( XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, XFS_DATA_FORK); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); - oldlen = startblockval(left.br_startblock) + - startblockval(new->br_startblock) + - startblockval(right.br_startblock); + oldlen = STARTBLOCKVAL(left.br_startblock) + + STARTBLOCKVAL(new->br_startblock) + + STARTBLOCKVAL(right.br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), - nullstartblock((int)newlen)); + NULLSTARTBLOCK((int)newlen)); XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, XFS_DATA_FORK); XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK); @@ -1964,11 +1964,11 @@ xfs_bmap_add_extent_hole_delay( XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, XFS_DATA_FORK); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); - oldlen = startblockval(left.br_startblock) + - startblockval(new->br_startblock); + oldlen = STARTBLOCKVAL(left.br_startblock) + + STARTBLOCKVAL(new->br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), - nullstartblock((int)newlen)); + NULLSTARTBLOCK((int)newlen)); XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; @@ -1985,11 +1985,11 @@ xfs_bmap_add_extent_hole_delay( */ XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK); temp = new->br_blockcount + right.br_blockcount; - oldlen = startblockval(new->br_startblock) + - startblockval(right.br_startblock); + oldlen = STARTBLOCKVAL(new->br_startblock) + + STARTBLOCKVAL(right.br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmbt_set_allf(ep, new->br_startoff, - nullstartblock((int)newlen), temp, right.br_state); + NULLSTARTBLOCK((int)newlen), temp, right.br_state); XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK); ip->i_df.if_lastex = idx; /* DELTA: One in-core extent grew into a hole. */ @@ -2085,7 +2085,7 @@ xfs_bmap_add_extent_hole_real( */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); - STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock)); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); } /* * Check and set flags if this segment has a current value. @@ -2095,7 +2095,7 @@ xfs_bmap_add_extent_hole_real( idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { xfs_bmbt_get_all(ep, &right); - STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock)); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock)); } /* * We're inserting a real allocation between "left" and "right". @@ -2143,7 +2143,7 @@ xfs_bmap_add_extent_hole_real( XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) - 1); if (cur == NULL) { - rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); + rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); } else { rval = XFS_ILOG_CORE; if ((error = xfs_bmbt_lookup_eq(cur, @@ -2185,7 +2185,7 @@ xfs_bmap_add_extent_hole_real( XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork); ifp->if_lastex = idx - 1; if (cur == NULL) { - rval = xfs_ilog_fext(whichfork); + rval = XFS_ILOG_FEXT(whichfork); } else { rval = 0; if ((error = xfs_bmbt_lookup_eq(cur, @@ -2220,7 +2220,7 @@ xfs_bmap_add_extent_hole_real( XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork); ifp->if_lastex = idx; if (cur == NULL) { - rval = xfs_ilog_fext(whichfork); + rval = XFS_ILOG_FEXT(whichfork); } else { rval = 0; if ((error = xfs_bmbt_lookup_eq(cur, @@ -2254,7 +2254,7 @@ xfs_bmap_add_extent_hole_real( XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) + 1); if (cur == NULL) { - rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); + rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); } else { rval = XFS_ILOG_CORE; if ((error = xfs_bmbt_lookup_eq(cur, @@ -2482,7 +2482,7 @@ xfs_bmap_adjacent( * try to use it's last block as our starting point. */ if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF && - !isnullstartblock(ap->prevp->br_startblock) && + !ISNULLSTARTBLOCK(ap->prevp->br_startblock) && ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount, ap->prevp->br_startblock)) { ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount; @@ -2511,7 +2511,7 @@ xfs_bmap_adjacent( * start block based on it. */ if (ap->prevp->br_startoff != NULLFILEOFF && - !isnullstartblock(ap->prevp->br_startblock) && + !ISNULLSTARTBLOCK(ap->prevp->br_startblock) && (prevbno = ap->prevp->br_startblock + ap->prevp->br_blockcount) && ISVALID(prevbno, ap->prevp->br_startblock)) { @@ -2552,7 +2552,7 @@ xfs_bmap_adjacent( * If there's a following (right) block, select a requested * start block based on it. */ - if (!isnullstartblock(ap->gotp->br_startblock)) { + if (!ISNULLSTARTBLOCK(ap->gotp->br_startblock)) { /* * Calculate gap to start of next block. */ @@ -3082,7 +3082,7 @@ xfs_bmap_btree_to_extents( ASSERT(ifp->if_broot == NULL); ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); - *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); + *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); return 0; } @@ -3136,8 +3136,8 @@ xfs_bmap_del_extent( del_endoff = del->br_startoff + del->br_blockcount; got_endoff = got.br_startoff + got.br_blockcount; ASSERT(got_endoff >= del_endoff); - delay = isnullstartblock(got.br_startblock); - ASSERT(isnullstartblock(del->br_startblock) == delay); + delay = ISNULLSTARTBLOCK(got.br_startblock); + ASSERT(ISNULLSTARTBLOCK(del->br_startblock) == delay); flags = 0; qfield = 0; error = 0; @@ -3189,7 +3189,7 @@ xfs_bmap_del_extent( } da_old = da_new = 0; } else { - da_old = startblockval(got.br_startblock); + da_old = STARTBLOCKVAL(got.br_startblock); da_new = 0; nblks = 0; do_fx = 0; @@ -3213,7 +3213,7 @@ xfs_bmap_del_extent( XFS_IFORK_NEXTENTS(ip, whichfork) - 1); flags |= XFS_ILOG_CORE; if (!cur) { - flags |= xfs_ilog_fext(whichfork); + flags |= XFS_ILOG_FEXT(whichfork); break; } if ((error = xfs_btree_delete(cur, &i))) @@ -3233,7 +3233,7 @@ xfs_bmap_del_extent( if (delay) { temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), da_old); - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork); da_new = temp; @@ -3242,7 +3242,7 @@ xfs_bmap_del_extent( xfs_bmbt_set_startblock(ep, del_endblock); XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork); if (!cur) { - flags |= xfs_ilog_fext(whichfork); + flags |= XFS_ILOG_FEXT(whichfork); break; } if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, @@ -3262,7 +3262,7 @@ xfs_bmap_del_extent( if (delay) { temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), da_old); - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork); da_new = temp; @@ -3270,7 +3270,7 @@ xfs_bmap_del_extent( } XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork); if (!cur) { - flags |= xfs_ilog_fext(whichfork); + flags |= XFS_ILOG_FEXT(whichfork); break; } if ((error = xfs_bmbt_update(cur, got.br_startoff, @@ -3345,22 +3345,22 @@ xfs_bmap_del_extent( } XFS_WANT_CORRUPTED_GOTO(i == 1, done); } else - flags |= xfs_ilog_fext(whichfork); + flags |= XFS_ILOG_FEXT(whichfork); XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) + 1); } else { ASSERT(whichfork == XFS_DATA_FORK); temp = xfs_bmap_worst_indlen(ip, temp); - xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); temp2 = xfs_bmap_worst_indlen(ip, temp2); - new.br_startblock = nullstartblock((int)temp2); + new.br_startblock = NULLSTARTBLOCK((int)temp2); da_new = temp + temp2; while (da_new > da_old) { if (temp) { temp--; da_new--; xfs_bmbt_set_startblock(ep, - nullstartblock((int)temp)); + NULLSTARTBLOCK((int)temp)); } if (da_new == da_old) break; @@ -3368,7 +3368,7 @@ xfs_bmap_del_extent( temp2--; da_new--; new.br_startblock = - nullstartblock((int)temp2); + NULLSTARTBLOCK((int)temp2); } } } @@ -3545,7 +3545,7 @@ xfs_bmap_extents_to_btree( nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); for (cnt = i = 0; i < nextents; i++) { ep = xfs_iext_get_ext(ifp, i); - if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) { + if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) { arp->l0 = cpu_to_be64(ep->l0); arp->l1 = cpu_to_be64(ep->l1); arp++; cnt++; @@ -3572,7 +3572,7 @@ xfs_bmap_extents_to_btree( xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); ASSERT(*curp == NULL); *curp = cur; - *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); + *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork); return 0; } @@ -3676,7 +3676,7 @@ xfs_bmap_local_to_extents( ip->i_d.di_nblocks = 1; XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); - flags |= xfs_ilog_fext(whichfork); + flags |= XFS_ILOG_FEXT(whichfork); } else { ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork); @@ -4082,7 +4082,7 @@ xfs_bmap_add_attrfork( XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); ip->i_afp->if_flags = XFS_IFEXTENTS; logflags = 0; - xfs_bmap_init(&flist, &firstblock); + XFS_BMAP_INIT(&flist, &firstblock); switch (ip->i_d.di_format) { case XFS_DINODE_FMT_LOCAL: error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist, @@ -4162,7 +4162,7 @@ xfs_bmap_add_free( ASSERT(bno != NULLFSBLOCK); ASSERT(len > 0); ASSERT(len <= MAXEXTLEN); - ASSERT(!isnullstartblock(bno)); + ASSERT(!ISNULLSTARTBLOCK(bno)); agno = XFS_FSB_TO_AGNO(mp, bno); agbno = XFS_FSB_TO_AGBNO(mp, bno); ASSERT(agno < mp->m_sb.sb_agcount); @@ -4909,7 +4909,7 @@ xfs_bmapi( got.br_startoff = end; inhole = eof || got.br_startoff > bno; wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) && - isnullstartblock(got.br_startblock); + ISNULLSTARTBLOCK(got.br_startblock); /* * First, deal with the hole before the allocated space * that we found, if any. @@ -5028,7 +5028,7 @@ xfs_bmapi( } ip->i_delayed_blks += alen; - abno = nullstartblock(indlen); + abno = NULLSTARTBLOCK(indlen); } else { /* * If first time, allocate and fill in @@ -5144,8 +5144,8 @@ xfs_bmapi( aoff + alen); #ifdef DEBUG if (flags & XFS_BMAPI_DELAY) { - ASSERT(isnullstartblock(got.br_startblock)); - ASSERT(startblockval(got.br_startblock) > 0); + ASSERT(ISNULLSTARTBLOCK(got.br_startblock)); + ASSERT(STARTBLOCKVAL(got.br_startblock) > 0); } ASSERT(got.br_state == XFS_EXT_NORM || got.br_state == XFS_EXT_UNWRITTEN); @@ -5179,7 +5179,7 @@ xfs_bmapi( ASSERT((bno >= obno) || (n == 0)); ASSERT(bno < end); mval->br_startoff = bno; - if (isnullstartblock(got.br_startblock)) { + if (ISNULLSTARTBLOCK(got.br_startblock)) { ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); mval->br_startblock = DELAYSTARTBLOCK; } else @@ -5201,7 +5201,7 @@ xfs_bmapi( ASSERT(mval->br_blockcount <= len); } else { *mval = got; - if (isnullstartblock(mval->br_startblock)) { + if (ISNULLSTARTBLOCK(mval->br_startblock)) { ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); mval->br_startblock = DELAYSTARTBLOCK; } @@ -5329,12 +5329,12 @@ xfs_bmapi( * Log everything. Do this after conversion, there's no point in * logging the extent records if we've converted to btree format. */ - if ((logflags & xfs_ilog_fext(whichfork)) && + if ((logflags & XFS_ILOG_FEXT(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) - logflags &= ~xfs_ilog_fext(whichfork); - else if ((logflags & xfs_ilog_fbroot(whichfork)) && + logflags &= ~XFS_ILOG_FEXT(whichfork); + else if ((logflags & XFS_ILOG_FBROOT(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) - logflags &= ~xfs_ilog_fbroot(whichfork); + logflags &= ~XFS_ILOG_FBROOT(whichfork); /* * Log whatever the flags say, even if error. Otherwise we might miss * detecting a case where the data is changed, there's an error, @@ -5411,7 +5411,7 @@ xfs_bmapi_single( *fsb = NULLFSBLOCK; return 0; } - ASSERT(!isnullstartblock(got.br_startblock)); + ASSERT(!ISNULLSTARTBLOCK(got.br_startblock)); ASSERT(bno < got.br_startoff + got.br_blockcount); *fsb = got.br_startblock + (bno - got.br_startoff); ifp->if_lastex = lastx; @@ -5543,7 +5543,7 @@ xfs_bunmapi( */ ASSERT(ep != NULL); del = got; - wasdel = isnullstartblock(del.br_startblock); + wasdel = ISNULLSTARTBLOCK(del.br_startblock); if (got.br_startoff < start) { del.br_startoff = start; del.br_blockcount -= start - got.br_startoff; @@ -5638,7 +5638,7 @@ xfs_bunmapi( xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), &prev); ASSERT(prev.br_state == XFS_EXT_NORM); - ASSERT(!isnullstartblock(prev.br_startblock)); + ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock)); ASSERT(del.br_startblock == prev.br_startblock + prev.br_blockcount); if (prev.br_startoff < start) { @@ -5666,7 +5666,7 @@ xfs_bunmapi( } } if (wasdel) { - ASSERT(startblockval(del.br_startblock) > 0); + ASSERT(STARTBLOCKVAL(del.br_startblock) > 0); /* Update realtime/data freespace, unreserve quota */ if (isrt) { xfs_filblks_t rtexts; @@ -5782,12 +5782,12 @@ xfs_bunmapi( * Log everything. Do this after conversion, there's no point in * logging the extent records if we've converted to btree format. */ - if ((logflags & xfs_ilog_fext(whichfork)) && + if ((logflags & XFS_ILOG_FEXT(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) - logflags &= ~xfs_ilog_fext(whichfork); - else if ((logflags & xfs_ilog_fbroot(whichfork)) && + logflags &= ~XFS_ILOG_FEXT(whichfork); + else if ((logflags & XFS_ILOG_FBROOT(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) - logflags &= ~xfs_ilog_fbroot(whichfork); + logflags &= ~XFS_ILOG_FBROOT(whichfork); /* * Log inode even in the error case, if the transaction * is dirty we'll need to shut down the filesystem. @@ -5838,7 +5838,7 @@ xfs_getbmapx_fix_eof_hole( if (startblock == DELAYSTARTBLOCK) out->bmv_block = -2; else - out->bmv_block = xfs_fsb_to_db(ip, startblock); + out->bmv_block = XFS_FSB_TO_DB(ip, startblock); fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset); ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) && @@ -5979,7 +5979,7 @@ xfs_getbmap( if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; - bmapi_flags = xfs_bmapi_aflag(whichfork) | + bmapi_flags = XFS_BMAPI_AFLAG(whichfork) | ((iflags & BMV_IF_PREALLOC) ? 0 : XFS_BMAPI_IGSTATE); /* @@ -6098,7 +6098,7 @@ xfs_bmap_isaeof( */ *aeof = (off >= s.br_startoff && off < s.br_startoff + s.br_blockcount && - isnullstartblock(s.br_startblock)) || + ISNULLSTARTBLOCK(s.br_startblock)) || off >= s.br_startoff + s.br_blockcount; return 0; } diff --git a/trunk/fs/xfs/xfs_bmap.h b/trunk/fs/xfs/xfs_bmap.h index be2979d88d32..284571c05ed0 100644 --- a/trunk/fs/xfs/xfs_bmap.h +++ b/trunk/fs/xfs/xfs_bmap.h @@ -95,6 +95,7 @@ typedef struct xfs_bmap_free /* need write cache flushing and no */ /* additional allocation alignments */ +#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w) static inline int xfs_bmapi_aflag(int w) { return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0); @@ -106,6 +107,7 @@ static inline int xfs_bmapi_aflag(int w) #define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL) #define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL) +#define XFS_BMAP_INIT(flp,fbp) xfs_bmap_init(flp,fbp) static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp) { ((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \ diff --git a/trunk/fs/xfs/xfs_bmap_btree.c b/trunk/fs/xfs/xfs_bmap_btree.c index 0760d352586f..ba6b08c2fb02 100644 --- a/trunk/fs/xfs/xfs_bmap_btree.c +++ b/trunk/fs/xfs/xfs_bmap_btree.c @@ -121,7 +121,7 @@ __xfs_bmbt_get_all( b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) | (((xfs_dfsbno_t)l1) >> 21); - ASSERT((b >> 32) == 0 || isnulldstartblock(b)); + ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); s->br_startblock = (xfs_fsblock_t)b; } #else /* !DEBUG */ @@ -172,7 +172,7 @@ xfs_bmbt_get_startblock( b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) | (((xfs_dfsbno_t)r->l1) >> 21); - ASSERT((b >> 32) == 0 || isnulldstartblock(b)); + ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); return (xfs_fsblock_t)b; #else /* !DEBUG */ return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21); @@ -261,7 +261,7 @@ xfs_bmbt_set_allf( ((xfs_bmbt_rec_base_t)blockcount & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); #else /* !XFS_BIG_BLKNOS */ - if (isnullstartblock(startblock)) { + if (ISNULLSTARTBLOCK(startblock)) { r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)startoff << 9) | (xfs_bmbt_rec_base_t)xfs_mask64lo(9); @@ -321,7 +321,7 @@ xfs_bmbt_disk_set_allf( ((xfs_bmbt_rec_base_t)blockcount & (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); #else /* !XFS_BIG_BLKNOS */ - if (isnullstartblock(startblock)) { + if (ISNULLSTARTBLOCK(startblock)) { r->l0 = cpu_to_be64( ((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)startoff << 9) | @@ -382,7 +382,7 @@ xfs_bmbt_set_startblock( r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) | (xfs_bmbt_rec_base_t)(v << 21); #else /* !XFS_BIG_BLKNOS */ - if (isnullstartblock(v)) { + if (ISNULLSTARTBLOCK(v)) { r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9); r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) | ((xfs_bmbt_rec_base_t)v << 21) | diff --git a/trunk/fs/xfs/xfs_bmap_btree.h b/trunk/fs/xfs/xfs_bmap_btree.h index 0e8df007615e..a4555abb6622 100644 --- a/trunk/fs/xfs/xfs_bmap_btree.h +++ b/trunk/fs/xfs/xfs_bmap_btree.h @@ -76,22 +76,26 @@ typedef struct xfs_bmbt_rec_host { #define DSTARTBLOCKMASK \ (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) +#define ISNULLSTARTBLOCK(x) isnullstartblock(x) static inline int isnullstartblock(xfs_fsblock_t x) { return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK; } +#define ISNULLDSTARTBLOCK(x) isnulldstartblock(x) static inline int isnulldstartblock(xfs_dfsbno_t x) { return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK; } +#define NULLSTARTBLOCK(k) nullstartblock(k) static inline xfs_fsblock_t nullstartblock(int k) { ASSERT(k < (1 << STARTBLOCKVALBITS)); return STARTBLOCKMASK | (k); } +#define STARTBLOCKVAL(x) startblockval(x) static inline xfs_filblks_t startblockval(xfs_fsblock_t x) { return (xfs_filblks_t)((x) & ~STARTBLOCKMASK); diff --git a/trunk/fs/xfs/xfs_btree.c b/trunk/fs/xfs/xfs_btree.c index e73c332eb23f..2c3ef20f8842 100644 --- a/trunk/fs/xfs/xfs_btree.c +++ b/trunk/fs/xfs/xfs_btree.c @@ -843,7 +843,7 @@ xfs_btree_ptr_is_null( union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) - return be64_to_cpu(ptr->l) == NULLDFSBNO; + return be64_to_cpu(ptr->l) == NULLFSBLOCK; else return be32_to_cpu(ptr->s) == NULLAGBLOCK; } @@ -854,7 +854,7 @@ xfs_btree_set_ptr_null( union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) - ptr->l = cpu_to_be64(NULLDFSBNO); + ptr->l = cpu_to_be64(NULLFSBLOCK); else ptr->s = cpu_to_be32(NULLAGBLOCK); } @@ -918,8 +918,8 @@ xfs_btree_init_block( new->bb_numrecs = cpu_to_be16(numrecs); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { - new->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO); - new->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO); + new->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK); + new->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK); } else { new->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); new->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); @@ -960,7 +960,7 @@ xfs_btree_buf_to_ptr( ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp))); else { - ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp, + ptr->s = cpu_to_be32(XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(bp))); } } @@ -971,7 +971,7 @@ xfs_btree_ptr_to_daddr( union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { - ASSERT(be64_to_cpu(ptr->l) != NULLDFSBNO); + ASSERT(be64_to_cpu(ptr->l) != NULLFSBLOCK); return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); } else { @@ -2454,7 +2454,7 @@ xfs_btree_new_iroot( xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); *logflags |= - XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork); + XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork); *stat = 1; XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return 0; @@ -3048,7 +3048,7 @@ xfs_btree_kill_iroot( cur->bc_bufs[level - 1] = NULL; be16_add_cpu(&block->bb_level, -1); xfs_trans_log_inode(cur->bc_tp, ip, - XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork)); + XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); cur->bc_nlevels--; out0: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); diff --git a/trunk/fs/xfs/xfs_da_btree.c b/trunk/fs/xfs/xfs_da_btree.c index c45f74ff1a5b..a11a8390bf6c 100644 --- a/trunk/fs/xfs/xfs_da_btree.c +++ b/trunk/fs/xfs/xfs_da_btree.c @@ -1597,7 +1597,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) nmap = 1; ASSERT(args->firstblock != NULL); if ((error = xfs_bmapi(tp, dp, bno, count, - xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA| + XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA| XFS_BMAPI_CONTIG, args->firstblock, args->total, &map, &nmap, args->flist, NULL))) { @@ -1618,7 +1618,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) nmap = MIN(XFS_BMAP_MAX_NMAP, count); c = (int)(bno + count - b); if ((error = xfs_bmapi(tp, dp, b, c, - xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE| + XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE| XFS_BMAPI_METADATA, args->firstblock, args->total, &mapp[mapi], &nmap, args->flist, @@ -1882,7 +1882,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, * the last block to the place we want to kill. */ if ((error = xfs_bunmapi(tp, dp, dead_blkno, count, - xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, + XFS_BMAPI_AFLAG(w)|XFS_BMAPI_METADATA, 0, args->firstblock, args->flist, NULL, &done)) == ENOSPC) { if (w != XFS_DATA_FORK) @@ -1987,7 +1987,7 @@ xfs_da_do_buf( if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno, nfsb, XFS_BMAPI_METADATA | - xfs_bmapi_aflag(whichfork), + XFS_BMAPI_AFLAG(whichfork), NULL, 0, mapp, &nmap, NULL, NULL))) goto exit0; } diff --git a/trunk/fs/xfs/xfs_ialloc.c b/trunk/fs/xfs/xfs_ialloc.c index ab016e5ae7be..e6ebbaeb4dc6 100644 --- a/trunk/fs/xfs/xfs_ialloc.c +++ b/trunk/fs/xfs/xfs_ialloc.c @@ -357,7 +357,7 @@ xfs_ialloc_ag_alloc( int ioffset = i << args.mp->m_sb.sb_inodelog; uint isize = sizeof(struct xfs_dinode); - free = xfs_make_iptr(args.mp, fbuf, i); + free = XFS_MAKE_IPTR(args.mp, fbuf, i); free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); free->di_version = version; free->di_gen = cpu_to_be32(gen); @@ -937,7 +937,7 @@ xfs_dialloc( } } } - offset = xfs_ialloc_find_free(&rec.ir_free); + offset = XFS_IALLOC_FIND_FREE(&rec.ir_free); ASSERT(offset >= 0); ASSERT(offset < XFS_INODES_PER_CHUNK); ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % @@ -1279,7 +1279,7 @@ xfs_imap( offset = XFS_INO_TO_OFFSET(mp, ino); ASSERT(offset < mp->m_sb.sb_inopblock); - cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno); + cluster_agbno = XFS_DADDR_TO_AGBNO(mp, imap->im_blkno); offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock; imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); diff --git a/trunk/fs/xfs/xfs_ialloc.h b/trunk/fs/xfs/xfs_ialloc.h index aeee8278f92c..50f558a4e0a8 100644 --- a/trunk/fs/xfs/xfs_ialloc.h +++ b/trunk/fs/xfs/xfs_ialloc.h @@ -39,6 +39,7 @@ struct xfs_trans; /* * Make an inode pointer out of the buffer/offset. */ +#define XFS_MAKE_IPTR(mp,b,o) xfs_make_iptr(mp,b,o) static inline struct xfs_dinode * xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o) { @@ -49,6 +50,7 @@ xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o) /* * Find a free (set) bit in the inode bitmask. */ +#define XFS_IALLOC_FIND_FREE(fp) xfs_ialloc_find_free(fp) static inline int xfs_ialloc_find_free(xfs_inofree_t *fp) { return xfs_lowbit64(*fp); diff --git a/trunk/fs/xfs/xfs_ialloc_btree.h b/trunk/fs/xfs/xfs_ialloc_btree.h index 5580e255ff06..37e5dd01a577 100644 --- a/trunk/fs/xfs/xfs_ialloc_btree.h +++ b/trunk/fs/xfs/xfs_ialloc_btree.h @@ -36,6 +36,7 @@ typedef __uint64_t xfs_inofree_t; #define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3) #define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1) +#define XFS_INOBT_MASKN(i,n) xfs_inobt_maskn(i,n) static inline xfs_inofree_t xfs_inobt_maskn(int i, int n) { return (((n) >= XFS_INODES_PER_CHUNK ? \ diff --git a/trunk/fs/xfs/xfs_inode.c b/trunk/fs/xfs/xfs_inode.c index e7ae08d1df48..5a5e035e5d38 100644 --- a/trunk/fs/xfs/xfs_inode.c +++ b/trunk/fs/xfs/xfs_inode.c @@ -424,19 +424,6 @@ xfs_iformat( case XFS_DINODE_FMT_LOCAL: atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); size = be16_to_cpu(atp->hdr.totsize); - - if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) { - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, - "corrupt inode %Lu " - "(bad attr fork size %Ld).", - (unsigned long long) ip->i_ino, - (long long) size); - XFS_CORRUPTION_ERROR("xfs_iformat(8)", - XFS_ERRLEVEL_LOW, - ip->i_mount, dip); - return XFS_ERROR(EFSCORRUPTED); - } - error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); break; case XFS_DINODE_FMT_EXTENTS: @@ -1614,10 +1601,10 @@ xfs_itruncate_finish( * in this file with garbage in them once recovery * runs. */ - xfs_bmap_init(&free_list, &first_block); + XFS_BMAP_INIT(&free_list, &first_block); error = xfs_bunmapi(ntp, ip, first_unmap_block, unmap_len, - xfs_bmapi_aflag(fork) | + XFS_BMAPI_AFLAG(fork) | (sync ? 0 : XFS_BMAPI_ASYNC), XFS_ITRUNC_MAX_EXTENTS, &first_block, &free_list, @@ -2570,7 +2557,7 @@ xfs_iextents_copy( for (i = 0; i < nrecs; i++) { xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); start_block = xfs_bmbt_get_startblock(ep); - if (isnullstartblock(start_block)) { + if (ISNULLSTARTBLOCK(start_block)) { /* * It's a delayed allocation extent, so skip it. */ diff --git a/trunk/fs/xfs/xfs_inode_item.h b/trunk/fs/xfs/xfs_inode_item.h index 9957d0602d54..1ff04cc323ad 100644 --- a/trunk/fs/xfs/xfs_inode_item.h +++ b/trunk/fs/xfs/xfs_inode_item.h @@ -111,16 +111,20 @@ typedef struct xfs_inode_log_format_64 { #define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED) + +#define XFS_ILOG_FBROOT(w) xfs_ilog_fbroot(w) static inline int xfs_ilog_fbroot(int w) { return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT); } +#define XFS_ILOG_FEXT(w) xfs_ilog_fext(w) static inline int xfs_ilog_fext(int w) { return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT); } +#define XFS_ILOG_FDATA(w) xfs_ilog_fdata(w) static inline int xfs_ilog_fdata(int w) { return (w == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA); diff --git a/trunk/fs/xfs/xfs_iomap.c b/trunk/fs/xfs/xfs_iomap.c index 08ce72316bfe..911062cf73a6 100644 --- a/trunk/fs/xfs/xfs_iomap.c +++ b/trunk/fs/xfs/xfs_iomap.c @@ -155,7 +155,7 @@ xfs_imap_to_bmap( iomapp->iomap_bn = IOMAP_DADDR_NULL; iomapp->iomap_flags |= IOMAP_DELAY; } else { - iomapp->iomap_bn = xfs_fsb_to_db(ip, start_block); + iomapp->iomap_bn = XFS_FSB_TO_DB(ip, start_block); if (ISUNWRITTEN(imap)) iomapp->iomap_flags |= IOMAP_UNWRITTEN; } @@ -261,7 +261,7 @@ xfs_iomap( xfs_iunlock(ip, lockmode); lockmode = 0; - if (nimaps && !isnullstartblock(imap.br_startblock)) { + if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock)) { xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, offset, count, iomapp, &imap, flags); break; @@ -491,7 +491,7 @@ xfs_iomap_write_direct( /* * Issue the xfs_bmapi() call to allocate the blocks */ - xfs_bmap_init(&free_list, &firstfsb); + XFS_BMAP_INIT(&free_list, &firstfsb); nimaps = 1; error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, &firstfsb, 0, &imap, &nimaps, &free_list, NULL); @@ -751,7 +751,7 @@ xfs_iomap_write_allocate( xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_ihold(tp, ip); - xfs_bmap_init(&free_list, &first_block); + XFS_BMAP_INIT(&free_list, &first_block); /* * it is possible that the extents have changed since @@ -911,7 +911,7 @@ xfs_iomap_write_unwritten( /* * Modify the unwritten extent state of the buffer. */ - xfs_bmap_init(&free_list, &firstfsb); + XFS_BMAP_INIT(&free_list, &firstfsb); nimaps = 1; error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, diff --git a/trunk/fs/xfs/xfs_itable.c b/trunk/fs/xfs/xfs_itable.c index cf98a805ec90..e19d0a8d5618 100644 --- a/trunk/fs/xfs/xfs_itable.c +++ b/trunk/fs/xfs/xfs_itable.c @@ -453,7 +453,7 @@ xfs_bulkstat( (chunkidx = agino - gino + 1) < XFS_INODES_PER_CHUNK && /* there are some left allocated */ - xfs_inobt_maskn(chunkidx, + XFS_INOBT_MASKN(chunkidx, XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) { /* * Grab the chunk record. Mark all the @@ -464,7 +464,7 @@ xfs_bulkstat( if (XFS_INOBT_MASK(i) & ~gfree) gcnt++; } - gfree |= xfs_inobt_maskn(0, chunkidx); + gfree |= XFS_INOBT_MASKN(0, chunkidx); irbp->ir_startino = gino; irbp->ir_freecount = gcnt; irbp->ir_free = gfree; @@ -535,7 +535,7 @@ xfs_bulkstat( chunkidx < XFS_INODES_PER_CHUNK; chunkidx += nicluster, agbno += nbcluster) { - if (xfs_inobt_maskn(chunkidx, + if (XFS_INOBT_MASKN(chunkidx, nicluster) & ~gfree) xfs_btree_reada_bufs(mp, agno, agbno, nbcluster); diff --git a/trunk/fs/xfs/xfs_mount.c b/trunk/fs/xfs/xfs_mount.c index 35300250e86d..3c97c6463a4e 100644 --- a/trunk/fs/xfs/xfs_mount.c +++ b/trunk/fs/xfs/xfs_mount.c @@ -45,6 +45,7 @@ #include "xfs_fsops.h" #include "xfs_utils.h" +STATIC int xfs_mount_log_sb(xfs_mount_t *, __int64_t); STATIC int xfs_uuid_mount(xfs_mount_t *); STATIC void xfs_unmountfs_wait(xfs_mount_t *); @@ -681,7 +682,7 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) * Update alignment values based on mount options and sb values */ STATIC int -xfs_update_alignment(xfs_mount_t *mp) +xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags) { xfs_sb_t *sbp = &(mp->m_sb); @@ -735,11 +736,11 @@ xfs_update_alignment(xfs_mount_t *mp) if (xfs_sb_version_hasdalign(sbp)) { if (sbp->sb_unit != mp->m_dalign) { sbp->sb_unit = mp->m_dalign; - mp->m_update_flags |= XFS_SB_UNIT; + *update_flags |= XFS_SB_UNIT; } if (sbp->sb_width != mp->m_swidth) { sbp->sb_width = mp->m_swidth; - mp->m_update_flags |= XFS_SB_WIDTH; + *update_flags |= XFS_SB_WIDTH; } } } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && @@ -904,6 +905,7 @@ xfs_mountfs( xfs_sb_t *sbp = &(mp->m_sb); xfs_inode_t *rip; __uint64_t resblks; + __int64_t update_flags = 0LL; uint quotamount, quotaflags; int uuid_mounted = 0; int error = 0; @@ -931,7 +933,7 @@ xfs_mountfs( "XFS: correcting sb_features alignment problem"); sbp->sb_features2 |= sbp->sb_bad_features2; sbp->sb_bad_features2 = sbp->sb_features2; - mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; + update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; /* * Re-check for ATTR2 in case it was found in bad_features2 @@ -945,11 +947,11 @@ xfs_mountfs( if (xfs_sb_version_hasattr2(&mp->m_sb) && (mp->m_flags & XFS_MOUNT_NOATTR2)) { xfs_sb_version_removeattr2(&mp->m_sb); - mp->m_update_flags |= XFS_SB_FEATURES2; + update_flags |= XFS_SB_FEATURES2; /* update sb_versionnum for the clearing of the morebits */ if (!sbp->sb_features2) - mp->m_update_flags |= XFS_SB_VERSIONNUM; + update_flags |= XFS_SB_VERSIONNUM; } /* @@ -958,7 +960,7 @@ xfs_mountfs( * allocator alignment is within an ag, therefore ag has * to be aligned at stripe boundary. */ - error = xfs_update_alignment(mp); + error = xfs_update_alignment(mp, &update_flags); if (error) goto error1; @@ -1135,12 +1137,10 @@ xfs_mountfs( } /* - * If this is a read-only mount defer the superblock updates until - * the next remount into writeable mode. Otherwise we would never - * perform the update e.g. for the root filesystem. + * If fs is not mounted readonly, then update the superblock changes. */ - if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { - error = xfs_mount_log_sb(mp, mp->m_update_flags); + if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { + error = xfs_mount_log_sb(mp, update_flags); if (error) { cmn_err(CE_WARN, "XFS: failed to write sb changes"); goto error4; @@ -1820,7 +1820,7 @@ xfs_uuid_mount( * be altered by the mount options, as well as any potential sb_features2 * fixup. Only the first superblock is updated. */ -int +STATIC int xfs_mount_log_sb( xfs_mount_t *mp, __int64_t fields) diff --git a/trunk/fs/xfs/xfs_mount.h b/trunk/fs/xfs/xfs_mount.h index f5e9937f9bdb..c1e028467327 100644 --- a/trunk/fs/xfs/xfs_mount.h +++ b/trunk/fs/xfs/xfs_mount.h @@ -44,9 +44,9 @@ typedef struct xfs_trans_reservations { #ifndef __KERNEL__ -#define xfs_daddr_to_agno(mp,d) \ +#define XFS_DADDR_TO_AGNO(mp,d) \ ((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks)) -#define xfs_daddr_to_agbno(mp,d) \ +#define XFS_DADDR_TO_AGBNO(mp,d) \ ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks)) #else /* __KERNEL__ */ @@ -327,8 +327,6 @@ typedef struct xfs_mount { spinlock_t m_sync_lock; /* work item list lock */ int m_sync_seq; /* sync thread generation no. */ wait_queue_head_t m_wait_single_sync_task; - __int64_t m_update_flags; /* sb flags we need to update - on the next remount,rw */ } xfs_mount_t; /* @@ -441,6 +439,7 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, */ #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ +#define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d) static inline xfs_agnumber_t xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d) { @@ -449,6 +448,7 @@ xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d) return (xfs_agnumber_t) ld; } +#define XFS_DADDR_TO_AGBNO(mp,d) xfs_daddr_to_agbno(mp,d) static inline xfs_agblock_t xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) { @@ -514,7 +514,6 @@ extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t, int64_t, int); extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, uint, int); -extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t); extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); extern int xfs_readsb(xfs_mount_t *, int); extern void xfs_freesb(xfs_mount_t *); diff --git a/trunk/fs/xfs/xfs_rename.c b/trunk/fs/xfs/xfs_rename.c index 58f85e9cd11d..86471bb40fd4 100644 --- a/trunk/fs/xfs/xfs_rename.c +++ b/trunk/fs/xfs/xfs_rename.c @@ -147,7 +147,7 @@ xfs_rename( xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, inodes, &num_inodes); - xfs_bmap_init(&free_list, &first_block); + XFS_BMAP_INIT(&free_list, &first_block); tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME); cancel_flags = XFS_TRANS_RELEASE_LOG_RES; spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); diff --git a/trunk/fs/xfs/xfs_rtalloc.c b/trunk/fs/xfs/xfs_rtalloc.c index c5bb86f3ec05..edf12c7b834c 100644 --- a/trunk/fs/xfs/xfs_rtalloc.c +++ b/trunk/fs/xfs/xfs_rtalloc.c @@ -120,7 +120,7 @@ xfs_growfs_rt_alloc( if ((error = xfs_trans_iget(mp, tp, ino, 0, XFS_ILOCK_EXCL, &ip))) goto error_cancel; - xfs_bmap_init(&flist, &firstblock); + XFS_BMAP_INIT(&flist, &firstblock); /* * Allocate blocks to the bitmap file. */ diff --git a/trunk/fs/xfs/xfs_rw.h b/trunk/fs/xfs/xfs_rw.h index f76c003ec55d..f87db5344ce6 100644 --- a/trunk/fs/xfs/xfs_rw.h +++ b/trunk/fs/xfs/xfs_rw.h @@ -28,6 +28,7 @@ struct xfs_mount; * file is a real time file or not, because the bmap code * does. */ +#define XFS_FSB_TO_DB(ip,fsb) xfs_fsb_to_db(ip,fsb) static inline xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) { diff --git a/trunk/fs/xfs/xfs_sb.h b/trunk/fs/xfs/xfs_sb.h index 1b017c657494..1ed71916e4c9 100644 --- a/trunk/fs/xfs/xfs_sb.h +++ b/trunk/fs/xfs/xfs_sb.h @@ -505,7 +505,7 @@ static inline void xfs_sb_version_removeattr2(xfs_sb_t *sbp) #define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d)) #define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \ - xfs_daddr_to_agno(mp,d), xfs_daddr_to_agbno(mp,d)) + XFS_DADDR_TO_AGNO(mp,d), XFS_DADDR_TO_AGBNO(mp,d)) #define XFS_FSB_TO_DADDR(mp,fsbno) XFS_AGB_TO_DADDR(mp, \ XFS_FSB_TO_AGNO(mp,fsbno), XFS_FSB_TO_AGBNO(mp,fsbno)) diff --git a/trunk/fs/xfs/xfs_vnodeops.c b/trunk/fs/xfs/xfs_vnodeops.c index 0e55c5d7db5f..f07bf8768c3a 100644 --- a/trunk/fs/xfs/xfs_vnodeops.c +++ b/trunk/fs/xfs/xfs_vnodeops.c @@ -862,7 +862,7 @@ xfs_inactive_symlink_rmt( * Find the block(s) so we can inval and unmap them. */ done = 0; - xfs_bmap_init(&free_list, &first_block); + XFS_BMAP_INIT(&free_list, &first_block); nmaps = ARRAY_SIZE(mval); if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, @@ -1288,7 +1288,7 @@ xfs_inactive( /* * Free the inode. */ - xfs_bmap_init(&free_list, &first_block); + XFS_BMAP_INIT(&free_list, &first_block); error = xfs_ifree(tp, ip, &free_list); if (error) { /* @@ -1461,7 +1461,7 @@ xfs_create( xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); unlock_dp_on_error = B_TRUE; - xfs_bmap_init(&free_list, &first_block); + XFS_BMAP_INIT(&free_list, &first_block); ASSERT(ip == NULL); @@ -1879,7 +1879,7 @@ xfs_remove( } } - xfs_bmap_init(&free_list, &first_block); + XFS_BMAP_INIT(&free_list, &first_block); error = xfs_dir_removename(tp, dp, name, ip->i_ino, &first_block, &free_list, resblks); if (error) { @@ -2059,7 +2059,7 @@ xfs_link( if (error) goto error_return; - xfs_bmap_init(&free_list, &first_block); + XFS_BMAP_INIT(&free_list, &first_block); error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, &first_block, &free_list, resblks); @@ -2231,7 +2231,7 @@ xfs_mkdir( xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); unlock_dp_on_error = B_FALSE; - xfs_bmap_init(&free_list, &first_block); + XFS_BMAP_INIT(&free_list, &first_block); error = xfs_dir_createname(tp, dp, dir_name, cdp->i_ino, &first_block, &free_list, resblks ? @@ -2438,7 +2438,7 @@ xfs_symlink( * Initialize the bmap freelist prior to calling either * bmapi or the directory create code. */ - xfs_bmap_init(&free_list, &first_block); + XFS_BMAP_INIT(&free_list, &first_block); /* * Allocate an inode for the symlink. @@ -2860,7 +2860,7 @@ xfs_alloc_file_space( /* * Issue the xfs_bmapi() call to allocate the blocks */ - xfs_bmap_init(&free_list, &firstfsb); + XFS_BMAP_INIT(&free_list, &firstfsb); error = xfs_bmapi(tp, ip, startoffset_fsb, allocatesize_fsb, bmapi_flag, &firstfsb, 0, imapp, &nimaps, @@ -2980,7 +2980,7 @@ xfs_zero_remaining_bytes( XFS_BUF_UNDONE(bp); XFS_BUF_UNWRITE(bp); XFS_BUF_READ(bp); - XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); + XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock)); xfsbdstrat(mp, bp); error = xfs_iowait(bp); if (error) { @@ -3186,7 +3186,7 @@ xfs_free_file_space( /* * issue the bunmapi() call to free the blocks */ - xfs_bmap_init(&free_list, &firstfsb); + XFS_BMAP_INIT(&free_list, &firstfsb); error = xfs_bunmapi(tp, ip, startoffset_fsb, endoffset_fsb - startoffset_fsb, 0, 2, &firstfsb, &free_list, NULL, &done); diff --git a/trunk/include/asm-generic/bitops/__ffs.h b/trunk/include/asm-generic/bitops/__ffs.h index 937d7c435575..9a3274aecf83 100644 --- a/trunk/include/asm-generic/bitops/__ffs.h +++ b/trunk/include/asm-generic/bitops/__ffs.h @@ -9,7 +9,7 @@ * * Undefined if no bit exists, so code should check against 0 first. */ -static __always_inline unsigned long __ffs(unsigned long word) +static inline unsigned long __ffs(unsigned long word) { int num = 0; diff --git a/trunk/include/asm-generic/bitops/__fls.h b/trunk/include/asm-generic/bitops/__fls.h index a60a7ccb6782..be24465403d6 100644 --- a/trunk/include/asm-generic/bitops/__fls.h +++ b/trunk/include/asm-generic/bitops/__fls.h @@ -9,7 +9,7 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static __always_inline unsigned long __fls(unsigned long word) +static inline unsigned long __fls(unsigned long word) { int num = BITS_PER_LONG - 1; diff --git a/trunk/include/asm-generic/bitops/fls.h b/trunk/include/asm-generic/bitops/fls.h index 0576d1f42f43..850859bc5069 100644 --- a/trunk/include/asm-generic/bitops/fls.h +++ b/trunk/include/asm-generic/bitops/fls.h @@ -9,7 +9,7 @@ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __always_inline int fls(int x) +static inline int fls(int x) { int r = 32; diff --git a/trunk/include/asm-generic/bitops/fls64.h b/trunk/include/asm-generic/bitops/fls64.h index b097cf8444e3..86d403f8b256 100644 --- a/trunk/include/asm-generic/bitops/fls64.h +++ b/trunk/include/asm-generic/bitops/fls64.h @@ -15,7 +15,7 @@ * at position 64. */ #if BITS_PER_LONG == 32 -static __always_inline int fls64(__u64 x) +static inline int fls64(__u64 x) { __u32 h = x >> 32; if (h) @@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x) return fls(x); } #elif BITS_PER_LONG == 64 -static __always_inline int fls64(__u64 x) +static inline int fls64(__u64 x) { if (x == 0) return 0; diff --git a/trunk/include/linux/init_task.h b/trunk/include/linux/init_task.h index ea0ea1a4c36f..2f3c2d4ef73b 100644 --- a/trunk/include/linux/init_task.h +++ b/trunk/include/linux/init_task.h @@ -48,12 +48,6 @@ extern struct fs_struct init_fs; .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ .rlim = INIT_RLIMITS, \ - .cputime = { .totals = { \ - .utime = cputime_zero, \ - .stime = cputime_zero, \ - .sum_exec_runtime = 0, \ - .lock = __SPIN_LOCK_UNLOCKED(sig.cputime.totals.lock), \ - }, }, \ } extern struct nsproxy init_nsproxy; diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 02e16d207304..4cae9b81a1f8 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -293,9 +293,6 @@ extern void sched_show_task(struct task_struct *p); extern void softlockup_tick(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); -extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, - struct file *filp, void __user *buffer, - size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; extern unsigned long sysctl_hung_task_check_count; extern unsigned long sysctl_hung_task_timeout_secs; @@ -453,7 +450,6 @@ struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; - spinlock_t lock; }; /* Alternate field names when used to cache expirations. */ #define prof_exp stime @@ -469,7 +465,7 @@ struct task_cputime { * used for thread group CPU clock calculations. */ struct thread_group_cputime { - struct task_cputime totals; + struct task_cputime *totals; }; /* @@ -2184,30 +2180,24 @@ static inline int spin_needbreak(spinlock_t *lock) * Thread group CPU time accounting. */ -static inline -void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) -{ - struct task_cputime *totals = &tsk->signal->cputime.totals; - unsigned long flags; - - spin_lock_irqsave(&totals->lock, flags); - *times = *totals; - spin_unlock_irqrestore(&totals->lock, flags); -} +extern int thread_group_cputime_alloc(struct task_struct *); +extern void thread_group_cputime(struct task_struct *, struct task_cputime *); static inline void thread_group_cputime_init(struct signal_struct *sig) { - sig->cputime.totals = (struct task_cputime){ - .utime = cputime_zero, - .stime = cputime_zero, - .sum_exec_runtime = 0, - }; + sig->cputime.totals = NULL; +} - spin_lock_init(&sig->cputime.totals.lock); +static inline int thread_group_cputime_clone_thread(struct task_struct *curr) +{ + if (curr->signal->cputime.totals) + return 0; + return thread_group_cputime_alloc(curr); } static inline void thread_group_cputime_free(struct signal_struct *sig) { + free_percpu(sig->cputime.totals); } /* diff --git a/trunk/include/linux/workqueue.h b/trunk/include/linux/workqueue.h index 3cd51e579ab1..b36291130f22 100644 --- a/trunk/include/linux/workqueue.h +++ b/trunk/include/linux/workqueue.h @@ -118,24 +118,12 @@ struct execute_work { init_timer(&(_work)->timer); \ } while (0) -#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ - do { \ - INIT_WORK(&(_work)->work, (_func)); \ - init_timer_on_stack(&(_work)->timer); \ - } while (0) - #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ do { \ INIT_WORK(&(_work)->work, (_func)); \ init_timer_deferrable(&(_work)->timer); \ } while (0) -#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ - do { \ - INIT_WORK(&(_work)->work, (_func)); \ - init_timer_on_stack(&(_work)->timer); \ - } while (0) - /** * work_pending - Find out whether a work item is currently pending * @work: The work item in question diff --git a/trunk/include/sound/soc-dapm.h b/trunk/include/sound/soc-dapm.h index dfa804958820..93a4edb148b5 100644 --- a/trunk/include/sound/soc-dapm.h +++ b/trunk/include/sound/soc-dapm.h @@ -108,7 +108,7 @@ #define SND_SOC_DAPM_SWITCH_E(wname, wreg, wshift, winvert, wcontrols, \ wevent, wflags) \ { .id = snd_soc_dapm_switch, .name = wname, .reg = wreg, .shift = wshift, \ - .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \ + .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1 \ .event = wevent, .event_flags = wflags} #define SND_SOC_DAPM_MUX_E(wname, wreg, wshift, winvert, wcontrols, \ wevent, wflags) \ diff --git a/trunk/init/Kconfig b/trunk/init/Kconfig index 3be35f3a001b..2af83825634e 100644 --- a/trunk/init/Kconfig +++ b/trunk/init/Kconfig @@ -238,98 +238,6 @@ config AUDIT_TREE def_bool y depends on AUDITSYSCALL && INOTIFY -menu "RCU Subsystem" - -choice - prompt "RCU Implementation" - default CLASSIC_RCU - -config CLASSIC_RCU - bool "Classic RCU" - help - This option selects the classic RCU implementation that is - designed for best read-side performance on non-realtime - systems. - - Select this option if you are unsure. - -config TREE_RCU - bool "Tree-based hierarchical RCU" - help - This option selects the RCU implementation that is - designed for very large SMP system with hundreds or - thousands of CPUs. - -config PREEMPT_RCU - bool "Preemptible RCU" - depends on PREEMPT - help - This option reduces the latency of the kernel by making certain - RCU sections preemptible. Normally RCU code is non-preemptible, if - this option is selected then read-only RCU sections become - preemptible. This helps latency, but may expose bugs due to - now-naive assumptions about each RCU read-side critical section - remaining on a given CPU through its execution. - -endchoice - -config RCU_TRACE - bool "Enable tracing for RCU" - depends on TREE_RCU || PREEMPT_RCU - help - This option provides tracing in RCU which presents stats - in debugfs for debugging RCU implementation. - - Say Y here if you want to enable RCU tracing - Say N if you are unsure. - -config RCU_FANOUT - int "Tree-based hierarchical RCU fanout value" - range 2 64 if 64BIT - range 2 32 if !64BIT - depends on TREE_RCU - default 64 if 64BIT - default 32 if !64BIT - help - This option controls the fanout of hierarchical implementations - of RCU, allowing RCU to work efficiently on machines with - large numbers of CPUs. This value must be at least the cube - root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit - systems and up to 262,144 for 64-bit systems. - - Select a specific number if testing RCU itself. - Take the default if unsure. - -config RCU_FANOUT_EXACT - bool "Disable tree-based hierarchical RCU auto-balancing" - depends on TREE_RCU - default n - help - This option forces use of the exact RCU_FANOUT value specified, - regardless of imbalances in the hierarchy. This is useful for - testing RCU itself, and might one day be useful on systems with - strong NUMA behavior. - - Without RCU_FANOUT_EXACT, the code will balance the hierarchy. - - Say N if unsure. - -config TREE_RCU_TRACE - def_bool RCU_TRACE && TREE_RCU - select DEBUG_FS - help - This option provides tracing for the TREE_RCU implementation, - permitting Makefile to trivially select kernel/rcutree_trace.c. - -config PREEMPT_RCU_TRACE - def_bool RCU_TRACE && PREEMPT_RCU - select DEBUG_FS - help - This option provides tracing for the PREEMPT_RCU implementation, - permitting Makefile to trivially select kernel/rcupreempt_trace.c. - -endmenu # "RCU Subsystem" - config IKCONFIG tristate "Kernel .config support" ---help--- @@ -1064,3 +972,90 @@ source "block/Kconfig" config PREEMPT_NOTIFIERS bool +choice + prompt "RCU Implementation" + default CLASSIC_RCU + +config CLASSIC_RCU + bool "Classic RCU" + help + This option selects the classic RCU implementation that is + designed for best read-side performance on non-realtime + systems. + + Select this option if you are unsure. + +config TREE_RCU + bool "Tree-based hierarchical RCU" + help + This option selects the RCU implementation that is + designed for very large SMP system with hundreds or + thousands of CPUs. + +config PREEMPT_RCU + bool "Preemptible RCU" + depends on PREEMPT + help + This option reduces the latency of the kernel by making certain + RCU sections preemptible. Normally RCU code is non-preemptible, if + this option is selected then read-only RCU sections become + preemptible. This helps latency, but may expose bugs due to + now-naive assumptions about each RCU read-side critical section + remaining on a given CPU through its execution. + +endchoice + +config RCU_TRACE + bool "Enable tracing for RCU" + depends on TREE_RCU || PREEMPT_RCU + help + This option provides tracing in RCU which presents stats + in debugfs for debugging RCU implementation. + + Say Y here if you want to enable RCU tracing + Say N if you are unsure. + +config RCU_FANOUT + int "Tree-based hierarchical RCU fanout value" + range 2 64 if 64BIT + range 2 32 if !64BIT + depends on TREE_RCU + default 64 if 64BIT + default 32 if !64BIT + help + This option controls the fanout of hierarchical implementations + of RCU, allowing RCU to work efficiently on machines with + large numbers of CPUs. This value must be at least the cube + root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit + systems and up to 262,144 for 64-bit systems. + + Select a specific number if testing RCU itself. + Take the default if unsure. + +config RCU_FANOUT_EXACT + bool "Disable tree-based hierarchical RCU auto-balancing" + depends on TREE_RCU + default n + help + This option forces use of the exact RCU_FANOUT value specified, + regardless of imbalances in the hierarchy. This is useful for + testing RCU itself, and might one day be useful on systems with + strong NUMA behavior. + + Without RCU_FANOUT_EXACT, the code will balance the hierarchy. + + Say N if unsure. + +config TREE_RCU_TRACE + def_bool RCU_TRACE && TREE_RCU + select DEBUG_FS + help + This option provides tracing for the TREE_RCU implementation, + permitting Makefile to trivially select kernel/rcutree_trace.c. + +config PREEMPT_RCU_TRACE + def_bool RCU_TRACE && PREEMPT_RCU + select DEBUG_FS + help + This option provides tracing for the PREEMPT_RCU implementation, + permitting Makefile to trivially select kernel/rcupreempt_trace.c. diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index 242a706e7721..bf0cef8bbdf2 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -817,17 +817,17 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; + int ret; if (clone_flags & CLONE_THREAD) { - atomic_inc(¤t->signal->count); - atomic_inc(¤t->signal->live); - return 0; + ret = thread_group_cputime_clone_thread(current); + if (likely(!ret)) { + atomic_inc(¤t->signal->count); + atomic_inc(¤t->signal->live); + } + return ret; } sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); - - if (sig) - posix_cpu_timers_init_group(sig); - tsk->signal = sig; if (!sig) return -ENOMEM; @@ -864,6 +864,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); + posix_cpu_timers_init_group(sig); + acct_init_pacct(&sig->pacct); tty_audit_fork(sig); diff --git a/trunk/kernel/hrtimer.c b/trunk/kernel/hrtimer.c index f33afb0407bc..2dc30c59c5fd 100644 --- a/trunk/kernel/hrtimer.c +++ b/trunk/kernel/hrtimer.c @@ -614,9 +614,7 @@ void clock_was_set(void) */ void hres_timers_resume(void) { - WARN_ONCE(!irqs_disabled(), - KERN_INFO "hres_timers_resume() called with IRQs enabled!"); - + /* Retrigger the CPU local events: */ retrigger_next_event(NULL); } diff --git a/trunk/kernel/posix-cpu-timers.c b/trunk/kernel/posix-cpu-timers.c index fa07da94d7be..157de3a47832 100644 --- a/trunk/kernel/posix-cpu-timers.c +++ b/trunk/kernel/posix-cpu-timers.c @@ -9,6 +9,76 @@ #include #include +/* + * Allocate the thread_group_cputime structure appropriately and fill in the + * current values of the fields. Called from copy_signal() via + * thread_group_cputime_clone_thread() when adding a second or subsequent + * thread to a thread group. Assumes interrupts are enabled when called. + */ +int thread_group_cputime_alloc(struct task_struct *tsk) +{ + struct signal_struct *sig = tsk->signal; + struct task_cputime *cputime; + + /* + * If we have multiple threads and we don't already have a + * per-CPU task_cputime struct (checked in the caller), allocate + * one and fill it in with the times accumulated so far. We may + * race with another thread so recheck after we pick up the sighand + * lock. + */ + cputime = alloc_percpu(struct task_cputime); + if (cputime == NULL) + return -ENOMEM; + spin_lock_irq(&tsk->sighand->siglock); + if (sig->cputime.totals) { + spin_unlock_irq(&tsk->sighand->siglock); + free_percpu(cputime); + return 0; + } + sig->cputime.totals = cputime; + cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id()); + cputime->utime = tsk->utime; + cputime->stime = tsk->stime; + cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; + spin_unlock_irq(&tsk->sighand->siglock); + return 0; +} + +/** + * thread_group_cputime - Sum the thread group time fields across all CPUs. + * + * @tsk: The task we use to identify the thread group. + * @times: task_cputime structure in which we return the summed fields. + * + * Walk the list of CPUs to sum the per-CPU time fields in the thread group + * time structure. + */ +void thread_group_cputime( + struct task_struct *tsk, + struct task_cputime *times) +{ + struct task_cputime *totals, *tot; + int i; + + totals = tsk->signal->cputime.totals; + if (!totals) { + times->utime = tsk->utime; + times->stime = tsk->stime; + times->sum_exec_runtime = tsk->se.sum_exec_runtime; + return; + } + + times->stime = times->utime = cputime_zero; + times->sum_exec_runtime = 0; + for_each_possible_cpu(i) { + tot = per_cpu_ptr(totals, i); + times->utime = cputime_add(times->utime, tot->utime); + times->stime = cputime_add(times->stime, tot->stime); + times->sum_exec_runtime += tot->sum_exec_runtime; + } +} + /* * Called after updating RLIMIT_CPU to set timer expiration if necessary. */ diff --git a/trunk/kernel/rcuclassic.c b/trunk/kernel/rcuclassic.c index bd5a9003497c..490934fc7ac3 100644 --- a/trunk/kernel/rcuclassic.c +++ b/trunk/kernel/rcuclassic.c @@ -716,7 +716,7 @@ void rcu_check_callbacks(int cpu, int user) raise_rcu_softirq(); } -static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, +static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { unsigned long flags; diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index b2fd602a6f6f..f2d8638e6c60 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -1314,7 +1314,7 @@ int rcu_needs_cpu(int cpu) * access due to the fact that this CPU cannot possibly have any RCU * callbacks in flight yet. */ -static void __cpuinit +static void rcu_init_percpu_data(int cpu, struct rcu_state *rsp) { unsigned long flags; diff --git a/trunk/kernel/relay.c b/trunk/kernel/relay.c index 9d79b7854fa6..09ac2008f77b 100644 --- a/trunk/kernel/relay.c +++ b/trunk/kernel/relay.c @@ -663,10 +663,8 @@ int relay_late_setup_files(struct rchan *chan, mutex_lock(&relay_channels_mutex); /* Is chan already set up? */ - if (unlikely(chan->has_base_filename)) { - mutex_unlock(&relay_channels_mutex); + if (unlikely(chan->has_base_filename)) return -EEXIST; - } chan->has_base_filename = 1; chan->parent = parent; curr_cpu = get_cpu(); diff --git a/trunk/kernel/sched_stats.h b/trunk/kernel/sched_stats.h index 8ab0cef8ecab..f2773b5d1226 100644 --- a/trunk/kernel/sched_stats.h +++ b/trunk/kernel/sched_stats.h @@ -296,7 +296,6 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) static inline void account_group_user_time(struct task_struct *tsk, cputime_t cputime) { - struct task_cputime *times; struct signal_struct *sig; /* tsk == current, ensure it is safe to use ->signal */ @@ -304,11 +303,13 @@ static inline void account_group_user_time(struct task_struct *tsk, return; sig = tsk->signal; - times = &sig->cputime.totals; + if (sig->cputime.totals) { + struct task_cputime *times; - spin_lock(×->lock); - times->utime = cputime_add(times->utime, cputime); - spin_unlock(×->lock); + times = per_cpu_ptr(sig->cputime.totals, get_cpu()); + times->utime = cputime_add(times->utime, cputime); + put_cpu_no_resched(); + } } /** @@ -324,7 +325,6 @@ static inline void account_group_user_time(struct task_struct *tsk, static inline void account_group_system_time(struct task_struct *tsk, cputime_t cputime) { - struct task_cputime *times; struct signal_struct *sig; /* tsk == current, ensure it is safe to use ->signal */ @@ -332,11 +332,13 @@ static inline void account_group_system_time(struct task_struct *tsk, return; sig = tsk->signal; - times = &sig->cputime.totals; + if (sig->cputime.totals) { + struct task_cputime *times; - spin_lock(×->lock); - times->stime = cputime_add(times->stime, cputime); - spin_unlock(×->lock); + times = per_cpu_ptr(sig->cputime.totals, get_cpu()); + times->stime = cputime_add(times->stime, cputime); + put_cpu_no_resched(); + } } /** @@ -352,7 +354,6 @@ static inline void account_group_system_time(struct task_struct *tsk, static inline void account_group_exec_runtime(struct task_struct *tsk, unsigned long long ns) { - struct task_cputime *times; struct signal_struct *sig; sig = tsk->signal; @@ -361,9 +362,11 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, if (unlikely(!sig)) return; - times = &sig->cputime.totals; + if (sig->cputime.totals) { + struct task_cputime *times; - spin_lock(×->lock); - times->sum_exec_runtime += ns; - spin_unlock(×->lock); + times = per_cpu_ptr(sig->cputime.totals, get_cpu()); + times->sum_exec_runtime += ns; + put_cpu_no_resched(); + } } diff --git a/trunk/kernel/softlockup.c b/trunk/kernel/softlockup.c index 85d5a2455103..d9188c66278a 100644 --- a/trunk/kernel/softlockup.c +++ b/trunk/kernel/softlockup.c @@ -16,7 +16,6 @@ #include #include #include -#include #include @@ -89,14 +88,6 @@ void touch_all_softlockup_watchdogs(void) } EXPORT_SYMBOL(touch_all_softlockup_watchdogs); -int proc_dosoftlockup_thresh(struct ctl_table *table, int write, - struct file *filp, void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - touch_all_softlockup_watchdogs(); - return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); -} - /* * This callback runs from the timer interrupt, and checks * whether the watchdog thread has hung or not: diff --git a/trunk/kernel/sysctl.c b/trunk/kernel/sysctl.c index 790f9d785663..368d1638ee78 100644 --- a/trunk/kernel/sysctl.c +++ b/trunk/kernel/sysctl.c @@ -809,7 +809,7 @@ static struct ctl_table kern_table[] = { .data = &softlockup_thresh, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dosoftlockup_thresh, + .proc_handler = &proc_dointvec_minmax, .strategy = &sysctl_intvec, .extra1 = &neg_one, .extra2 = &sixty, diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index d3f1ef4d5cbe..1b6c05bd0d0a 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz); * value. We do this unconditionally on any cpu, as we don't know whether the * cpu, which has the update task assigned is in a long sleep. */ -static void tick_nohz_update_jiffies(void) +void tick_nohz_update_jiffies(void) { int cpu = smp_processor_id(); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index 1f0c509b40d3..2f445833ae37 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -971,8 +971,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, } #ifdef CONFIG_SMP -static struct workqueue_struct *work_on_cpu_wq __read_mostly; - struct work_for_cpu { struct work_struct work; long (*fn)(void *); @@ -993,8 +991,8 @@ static void do_work_for_cpu(struct work_struct *w) * @fn: the function to run * @arg: the function arg * - * This will return the value @fn returns. - * It is up to the caller to ensure that the cpu doesn't go offline. + * This will return -EINVAL in the cpu is not online, or the return value + * of @fn otherwise. */ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) { @@ -1003,8 +1001,14 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) INIT_WORK(&wfc.work, do_work_for_cpu); wfc.fn = fn; wfc.arg = arg; - queue_work_on(cpu, work_on_cpu_wq, &wfc.work); - flush_work(&wfc.work); + get_online_cpus(); + if (unlikely(!cpu_online(cpu))) + wfc.ret = -EINVAL; + else { + schedule_work_on(cpu, &wfc.work); + flush_work(&wfc.work); + } + put_online_cpus(); return wfc.ret; } @@ -1021,8 +1025,4 @@ void __init init_workqueues(void) hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); -#ifdef CONFIG_SMP - work_on_cpu_wq = create_workqueue("work_on_cpu"); - BUG_ON(!work_on_cpu_wq); -#endif } diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index 29044f500269..4c9ae6085c75 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -570,15 +570,6 @@ config DEBUG_NOTIFIERS This is a relatively cheap check but if you care about maximum performance, say N. -# -# Select this config option from the architecture Kconfig, if it -# it is preferred to always offer frame pointers as a config -# option on the architecture (regardless of KERNEL_DEBUG): -# -config ARCH_WANT_FRAME_POINTERS - bool - help - config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && \ @@ -640,6 +631,19 @@ config RCU_TORTURE_TEST_RUNNABLE Say N here if you want the RCU torture tests to start only after being manually enabled via /proc. +config RCU_CPU_STALL_DETECTOR + bool "Check for stalled CPUs delaying RCU grace periods" + depends on CLASSIC_RCU + default n + help + This option causes RCU to printk information on which + CPUs are delaying the current grace period, but only when + the grace period extends for excessive time periods. + + Say Y if you want RCU to perform such checks. + + Say N if you are unsure. + config RCU_CPU_STALL_DETECTOR bool "Check for stalled CPUs delaying RCU grace periods" depends on CLASSIC_RCU || TREE_RCU diff --git a/trunk/sound/pci/Kconfig b/trunk/sound/pci/Kconfig index 82b9bddcdcd6..6e3a1848447c 100644 --- a/trunk/sound/pci/Kconfig +++ b/trunk/sound/pci/Kconfig @@ -744,8 +744,8 @@ config SND_VIRTUOSO select SND_OXYGEN_LIB help Say Y here to include support for sound cards based on the - Asus AV100/AV200 chips, i.e., Xonar D1, DX, D2 and D2X. - Support for the HDAV1.3 (Deluxe) is very experimental. + Asus AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X and + HDAV1.3 (Deluxe). To compile this driver as a module, choose M here: the module will be called snd-virtuoso. diff --git a/trunk/sound/pci/hda/patch_realtek.c b/trunk/sound/pci/hda/patch_realtek.c index 5d249a547fbf..82dd08431970 100644 --- a/trunk/sound/pci/hda/patch_realtek.c +++ b/trunk/sound/pci/hda/patch_realtek.c @@ -8478,7 +8478,6 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_ASUS_EEE1601), SND_PCI_QUIRK(0x105b, 0x0ce8, "Foxconn P35AX-S", ALC883_6ST_DIG), SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC883_6ST_DIG), - SND_PCI_QUIRK(0x1071, 0x8227, "Mitac 82801H", ALC883_MITAC), SND_PCI_QUIRK(0x1071, 0x8253, "Mitac 8252d", ALC883_MITAC), SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL), @@ -8527,7 +8526,6 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66), SND_PCI_QUIRK(0x8086, 0x0001, "DG33BUC", ALC883_3ST_6ch_INTEL), SND_PCI_QUIRK(0x8086, 0x0002, "DG33FBC", ALC883_3ST_6ch_INTEL), - SND_PCI_QUIRK(0x8086, 0x2503, "82801H", ALC883_MITAC), SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC883_3ST_6ch_INTEL), SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch), {} diff --git a/trunk/sound/pci/hda/patch_sigmatel.c b/trunk/sound/pci/hda/patch_sigmatel.c index 3dd4eee70b7c..c39deebb588f 100644 --- a/trunk/sound/pci/hda/patch_sigmatel.c +++ b/trunk/sound/pci/hda/patch_sigmatel.c @@ -81,7 +81,6 @@ enum { enum { STAC_92HD83XXX_REF, - STAC_92HD83XXX_PWR_REF, STAC_92HD83XXX_MODELS }; @@ -335,7 +334,7 @@ static hda_nid_t stac92hd83xxx_slave_dig_outs[2] = { }; static unsigned int stac92hd83xxx_pwr_mapping[4] = { - 0x03, 0x0c, 0x20, 0x40, + 0x03, 0x0c, 0x10, 0x40, }; static hda_nid_t stac92hd83xxx_amp_nids[1] = { @@ -842,6 +841,10 @@ static struct hda_verb stac92hd73xx_10ch_core_init[] = { }; static struct hda_verb stac92hd83xxx_core_init[] = { + /* start of config #1 */ + { 0xe, AC_VERB_SET_CONNECT_SEL, 0x3}, + + /* start of config #2 */ { 0xa, AC_VERB_SET_CONNECT_SEL, 0x0}, { 0xb, AC_VERB_SET_CONNECT_SEL, 0x0}, { 0xd, AC_VERB_SET_CONNECT_SEL, 0x1}, @@ -882,8 +885,8 @@ static struct hda_verb stac92hd71bxx_analog_core_init[] = { static struct hda_verb stac925x_core_init[] = { /* set dac0mux for dac converter */ { 0x06, AC_VERB_SET_CONNECT_SEL, 0x00}, - /* mute the master volume */ - { 0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE }, + /* unmute and set max the selector */ + { 0x0e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f }, {} }; @@ -1135,8 +1138,6 @@ static struct snd_kcontrol_new stac92hd71bxx_mixer[] = { }; static struct snd_kcontrol_new stac925x_mixer[] = { - HDA_CODEC_VOLUME("Master Playback Volume", 0x0e, 0, HDA_OUTPUT), - HDA_CODEC_MUTE("Master Playback Switch", 0x0e, 0, HDA_OUTPUT), STAC_INPUT_SOURCE(1), HDA_CODEC_VOLUME("Capture Volume", 0x09, 0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x14, 0, HDA_OUTPUT), @@ -1735,12 +1736,10 @@ static unsigned int ref92hd83xxx_pin_configs[14] = { static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, - [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, }; static const char *stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_REF] = "ref", - [STAC_92HD83XXX_PWR_REF] = "mic-ref", }; static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { @@ -1800,8 +1799,6 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = { "HP dv5", STAC_HP_M4), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30f4, "HP dv7", STAC_HP_M4), - SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30f7, - "HP dv4", STAC_HP_DV5), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30fc, "HP dv7", STAC_HP_M4), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3603, @@ -3576,12 +3573,13 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out err = stac92xx_auto_fill_dac_nids(codec); if (err < 0) return err; - err = stac92xx_auto_create_multi_out_ctls(codec, - &spec->autocfg); - if (err < 0) - return err; } + err = stac92xx_auto_create_multi_out_ctls(codec, &spec->autocfg); + + if (err < 0) + return err; + /* setup analog beep controls */ if (spec->anabeep_nid > 0) { err = stac92xx_auto_create_beep_ctls(codec, @@ -4755,9 +4753,7 @@ static struct hda_input_mux stac92hd83xxx_dmux = { static int patch_stac92hd83xxx(struct hda_codec *codec) { struct sigmatel_spec *spec; - hda_nid_t conn[STAC92HD83_DAC_COUNT + 1]; int err; - int num_dacs; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) @@ -4776,16 +4772,15 @@ static int patch_stac92hd83xxx(struct hda_codec *codec) spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids); spec->multiout.dac_nids = spec->dac_nids; - - /* set port 0xe to select the last DAC - */ - num_dacs = snd_hda_get_connections(codec, 0x0e, - conn, STAC92HD83_DAC_COUNT + 1) - 1; - - snd_hda_codec_write_cache(codec, 0xe, 0, - AC_VERB_SET_CONNECT_SEL, num_dacs); - spec->init = stac92hd83xxx_core_init; + switch (codec->vendor_id) { + case 0x111d7605: + break; + default: + spec->num_pwrs--; + spec->init++; /* switch to config #2 */ + } + spec->mixer = stac92hd83xxx_mixer; spec->num_pins = ARRAY_SIZE(stac92hd83xxx_pin_nids); spec->num_dmuxes = ARRAY_SIZE(stac92hd83xxx_dmux_nids); @@ -4811,15 +4806,6 @@ static int patch_stac92hd83xxx(struct hda_codec *codec) return err; } - switch (codec->vendor_id) { - case 0x111d7604: - case 0x111d7605: - if (spec->board_config == STAC_92HD83XXX_PWR_REF) - break; - spec->num_pwrs = 0; - break; - } - err = stac92xx_parse_auto_config(codec, 0x1d, 0); if (!err) { if (spec->board_config < 0) { diff --git a/trunk/sound/pci/oxygen/virtuoso.c b/trunk/sound/pci/oxygen/virtuoso.c index 18c7c91786bc..e9e829e83d7a 100644 --- a/trunk/sound/pci/oxygen/virtuoso.c +++ b/trunk/sound/pci/oxygen/virtuoso.c @@ -683,7 +683,7 @@ static void xonar_hdav_uart_input(struct oxygen *chip) if (chip->uart_input_count >= 2 && chip->uart_input[chip->uart_input_count - 2] == 'O' && chip->uart_input[chip->uart_input_count - 1] == 'K') { - printk(KERN_DEBUG "message from Xonar HDAV HDMI chip received:\n"); + printk(KERN_DEBUG "message from Xonar HDAV HDMI chip received:"); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, chip->uart_input, chip->uart_input_count); chip->uart_input_count = 0; @@ -908,7 +908,6 @@ static const struct oxygen_model model_xonar_hdav = { .dac_channels = 8, .dac_volume_min = 0x0f, .dac_volume_max = 0xff, - .misc_flags = OXYGEN_MISC_MIDI, .function_flags = OXYGEN_FUNCTION_2WIRE, .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, diff --git a/trunk/sound/soc/atmel/atmel-pcm.c b/trunk/sound/soc/atmel/atmel-pcm.c index 3dcdc4e3cfa0..1fac5efd285b 100644 --- a/trunk/sound/soc/atmel/atmel-pcm.c +++ b/trunk/sound/soc/atmel/atmel-pcm.c @@ -44,6 +44,8 @@ #include #include +#include + #include "atmel-pcm.h" diff --git a/trunk/sound/soc/fsl/mpc8610_hpcd.c b/trunk/sound/soc/fsl/mpc8610_hpcd.c index acf39a646b2f..bcec3f60bad9 100644 --- a/trunk/sound/soc/fsl/mpc8610_hpcd.c +++ b/trunk/sound/soc/fsl/mpc8610_hpcd.c @@ -182,6 +182,16 @@ static struct snd_soc_ops mpc8610_hpcd_ops = { .startup = mpc8610_hpcd_startup, }; +/** + * mpc8610_hpcd_machine: ASoC machine data + */ +static struct snd_soc_card mpc8610_hpcd_machine = { + .probe = mpc8610_hpcd_machine_probe, + .remove = mpc8610_hpcd_machine_remove, + .name = "MPC8610 HPCD", + .num_links = 1, +}; + /** * mpc8610_hpcd_probe: OF probe function for the fabric driver * @@ -445,11 +455,7 @@ static int mpc8610_hpcd_probe(struct of_device *ofdev, machine_data->dai.codec_dai = &cs4270_dai; /* The codec_dai we want */ machine_data->dai.ops = &mpc8610_hpcd_ops; - machine_data->machine.probe = mpc8610_hpcd_machine_probe; - machine_data->machine.remove = mpc8610_hpcd_machine_remove; - machine_data->machine.name = "MPC8610 HPCD"; - machine_data->machine.num_links = 1; - machine_data->machine.dai_link = &machine_data->dai; + mpc8610_hpcd_machine.dai_link = &machine_data->dai; /* Allocate a new audio platform device structure */ sound_device = platform_device_alloc("soc-audio", -1); @@ -459,7 +465,7 @@ static int mpc8610_hpcd_probe(struct of_device *ofdev, goto error; } - machine_data->sound_devdata.card = &machine_data->machine; + machine_data->sound_devdata.card = &mpc8610_hpcd_machine; machine_data->sound_devdata.codec_dev = &soc_codec_device_cs4270; machine_data->machine.platform = &fsl_soc_platform;